diff --git a/.gitattributes b/.gitattributes index 24d63fe05dd5..5153c0907c9e 100644 --- a/.gitattributes +++ b/.gitattributes @@ -5,8 +5,6 @@ scripts/unittest eol=lf *.groovy eol=lf *.csv binary *.json eol=lf -Documentation/Books/SummaryBlacklist.txt eol=lf -Documentation/Examples/*.generated merge=ours VERSION merge=ours STARTER_REV merge=ours lib/V8/v8-json.cpp merge=ours diff --git a/CHANGELOG b/CHANGELOG index a85b4392034d..aebd26009540 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,6 +1,12 @@ devel ----- +* removed content from Documentation/Books, but keeping the subfolders. + The documentation is in a separate repository (except DocuBlocks and Scripts): + https://github.com/arangodb/docs.git + +* TOKENS function updated to deal with primitive types and arrays + * MinReplicationFactor: Collections can now be created with a minimal replication factor (minReplicationFactor) default 1. If minReplicationFactor > 1 a collection will go into "read-only" mode as soon as it has less then minReplicationFactor diff --git a/CMakeLists.txt b/CMakeLists.txt index 4d6de8908483..3d11f79f43ca 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -296,18 +296,18 @@ set(ARANGODB_PACKAGE_VENDOR "ArangoDB GmbH") set(ARANGODB_PACKAGE_CONTACT "info@arangodb.com") set(ARANGODB_DISPLAY_NAME "ArangoDB") set(ARANGODB_URL_INFO_ABOUT "https://www.arangodb.com") -set(ARANGODB_HELP_LINK "https://docs.arangodb.com/${ARANGODB_VERSION_MAJOR}.${ARANGODB_VERSION_MINOR}/") +set(ARANGODB_HELP_LINK "https://www.arangodb.com/docs/${ARANGODB_VERSION_MAJOR}.${ARANGODB_VERSION_MINOR}/") set(ARANGODB_CONTACT "hackers@arangodb.com") set(ARANGODB_FRIENDLY_STRING "ArangoDB - the native multi-model NoSQL database") # MSVC -set(ARANGO_BENCH_FRIENDLY_STRING "arangobench - stress test program") -set(ARANGO_DUMP_FRIENDLY_STRING "arangodump - export") -set(ARANGO_RESTORE_FRIENDLY_STRING "arangrestore - importer") -set(ARANGO_EXPORT_FRIENDLY_STRING "arangoexport - datae xporter") -set(ARANGO_IMPORT_FRIENDLY_STRING "arangoimport - TSV/CSV/JSON importer") -set(ARANGOSH_FRIENDLY_STRING "arangosh - commandline client") -set(ARANGO_VPACK_FRIENDLY_STRING "arangovpack - vpack printer") +set(ARANGO_BENCH_FRIENDLY_STRING "arangobench - stress test tool") +set(ARANGO_DUMP_FRIENDLY_STRING "arangodump - dump data and configuration") +set(ARANGO_RESTORE_FRIENDLY_STRING "arangrestore - restore data and configuration") +set(ARANGO_EXPORT_FRIENDLY_STRING "arangoexport - data exporter") +set(ARANGO_IMPORT_FRIENDLY_STRING "arangoimport - data importer") +set(ARANGOSH_FRIENDLY_STRING "arangosh - command-line client") +set(ARANGO_VPACK_FRIENDLY_STRING "arangovpack - VelocyPack pretty-printer") # libraries set(LIB_ARANGO arango) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bf74ab7d90ad..f65590aa1d87 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -33,12 +33,8 @@ Contributing features, documentation, tests commit message so the issues will get updated automatically with comments. * If the modifications change any documented behavior or add new features, - document the changes. The documentation can be found in arangod/Documentation - directory. To recreate the documentation locally, run make doxygen. This will - re-create all documentation files in the Doxygen directory in your - repository. You can inspect the documentation in this folder using a text - editor or a browser. We recently agreed that future documentation should be - written in American English (AE). + document the changes. It should be written in American English. + The documentation can be found at https://github.com/arangodb/docs * When done, run the complete test suite and make sure all tests pass. You can check [README_maintainers.md](README_maintainers.md) for test run instructions. diff --git a/Documentation/Books/.gitignore b/Documentation/Books/.gitignore deleted file mode 100644 index 6be6cbffe0ab..000000000000 --- a/Documentation/Books/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -*/manual.epub -*/manual.mobi -*/manual.pdf -*/node_modules/ -books -repos -ppbooks -allComments.txt diff --git a/Documentation/Books/AQL/.gitkeep b/Documentation/Books/AQL/.gitkeep new file mode 100644 index 000000000000..936ca3adc4e3 --- /dev/null +++ b/Documentation/Books/AQL/.gitkeep @@ -0,0 +1,5 @@ +Git can not track empty repositories. +This file ensures that the directory is kept. + +Some of the old documentation building scripts are still +used by the new system which copy files into this folder. \ No newline at end of file diff --git a/Documentation/Books/AQL/Advanced/ArrayOperators.md b/Documentation/Books/AQL/Advanced/ArrayOperators.md deleted file mode 100644 index 5a8f61f5a4ec..000000000000 --- a/Documentation/Books/AQL/Advanced/ArrayOperators.md +++ /dev/null @@ -1,302 +0,0 @@ -Array Operators -=============== - - -Array expansion ---------------- - -In order to access a named attribute from all elements in an array easily, AQL -offers the shortcut operator [\*] for array variable expansion. - -Using the [\*] operator with an array variable will iterate over all elements -in the array, thus allowing to access a particular attribute of each element. It is -required that the expanded variable is an array. The result of the [\*] -operator is again an array. - -To demonstrate the array expansion operator, let's go on with the following three -example *users* documents: - -```json -[ - { - name: "john", - age: 35, - friends: [ - { name: "tina", age: 43 }, - { name: "helga", age: 52 }, - { name: "alfred", age: 34 } - ] - }, - { - name: "yves", - age: 24, - friends: [ - { name: "sergei", age: 27 }, - { name: "tiffany", age: 25 } - ] - }, - { - name: "sandra", - age: 40, - friends: [ - { name: "bob", age: 32 }, - { name: "elena", age: 48 } - ] - } -] -``` - -With the [\*] operator it becomes easy to query just the names of the -friends for each user: - -``` -FOR u IN users - RETURN { name: u.name, friends: u.friends[*].name } -``` - -This will produce: - -```json -[ - { "name" : "john", "friends" : [ "tina", "helga", "alfred" ] }, - { "name" : "yves", "friends" : [ "sergei", "tiffany" ] }, - { "name" : "sandra", "friends" : [ "bob", "elena" ] } -] -``` - -This is a shortcut for the longer, semantically equivalent query: - -```js -FOR u IN users - RETURN { name: u.name, friends: (FOR f IN u.friends RETURN f.name) } -``` - -Array contraction ------------------ - -In order to collapse (or flatten) results in nested arrays, AQL provides the [\*\*] -operator. It works similar to the [\*] operator, but additionally collapses nested -arrays. - -How many levels are collapsed is determined by the amount of asterisk characters used. -[\*\*] collapses one level of nesting - just like `FLATTEN(array)` or `FLATTEN(array, 1)` -would do -, [\*\*\*] collapses two levels - the equivalent to `FLATTEN(array, 2)` - and -so on. - -Let's compare the array expansion operator with an array contraction operator. -For example, the following query produces an array of friend names per user: - -```js -FOR u IN users - RETURN u.friends[*].name -``` - -As we have multiple users, the overall result is a nested array: - -```json -[ - [ - "tina", - "helga", - "alfred" - ], - [ - "sergei", - "tiffany" - ], - [ - "bob", - "elena" - ] -] -``` - -If the goal is to get rid of the nested array, we can apply the [\*\*] operator on the -result. But simply appending [\*\*] to the query won't help, because *u.friends* -is not a nested (multi-dimensional) array, but a simple (one-dimensional) array. Still, -the [\*\*] can be used if it has access to a multi-dimensional nested result. - -We can extend above query as follows and still create the same nested result: - -```js -RETURN ( - FOR u IN users RETURN u.friends[*].name -) -``` - -By now appending the [\*\*] operator at the end of the query... - -```js -RETURN ( - FOR u IN users RETURN u.friends[*].name -)[**] -``` - -... the query result becomes: - -```json -[ - [ - "tina", - "helga", - "alfred", - "sergei", - "tiffany", - "bob", - "elena" - ] -] -``` - -Note that the elements are not de-duplicated. For a flat array with only unique -elements, a combination of [UNIQUE()](../Functions/Array.md#unique) and -[FLATTEN()](../Functions/Array.md#flatten) is advisable. - -Inline expressions ------------------- - -It is possible to filter elements while iterating over an array, to limit the amount -of returned elements and to create a projection using the current array element. -Sorting is not supported by this shorthand form. - -These inline expressions can follow array expansion and contraction operators -[\* ...], [\*\* ...] etc. The keywords *FILTER*, *LIMIT* and *RETURN* -must occur in this order if they are used in combination, and can only occur once: - -`anyArray[* FILTER conditions LIMIT skip,limit RETURN projection]` - -Example with nested numbers and array contraction: - -```js -LET arr = [ [ 1, 2 ], 3, [ 4, 5 ], 6 ] -RETURN arr[** FILTER CURRENT % 2 == 0] -``` - -All even numbers are returned in a flat array: - -```json -[ - [ 2, 4, 6 ] -] -``` - -Complex example with multiple conditions, limit and projection: - -```js -FOR u IN users - RETURN { - name: u.name, - friends: u.friends[* FILTER CONTAINS(CURRENT.name, "a") AND CURRENT.age > 40 - LIMIT 2 - RETURN CONCAT(CURRENT.name, " is ", CURRENT.age) - ] - } -``` - -No more than two computed strings based on *friends* with an `a` in their name and -older than 40 years are returned per user: - -```json -[ - { - "name": "john", - "friends": [ - "tina is 43", - "helga is 52" - ] - }, - { - "name": "sandra", - "friends": [ - "elena is 48" - ] - }, - { - "name": "yves", - "friends": [] - } -] -``` - -### Inline filter - -To return only the names of friends that have an *age* value -higher than the user herself, an inline *FILTER* can be used: - -```js -FOR u IN users - RETURN { name: u.name, friends: u.friends[* FILTER CURRENT.age > u.age].name } -``` - -The pseudo-variable *CURRENT* can be used to access the current array element. -The *FILTER* condition can refer to *CURRENT* or any variables valid in the -outer scope. - -### Inline limit - -The number of elements returned can be restricted with *LIMIT*. It works the same -as the [limit operation](../Operations/Limit.md). *LIMIT* must come after *FILTER* -and before *RETURN*, if they are present. - -```js -FOR u IN users - RETURN { name: u.name, friends: u.friends[* LIMIT 1].name } -``` - -Above example returns one friend each: - -```json -[ - { "name": "john", "friends": [ "tina" ] }, - { "name": "sandra", "friends": [ "bob" ] }, - { "name": "yves", "friends": [ "sergei" ] } -] -``` - -A number of elements can also be skipped and up to *n* returned: - -```js -FOR u IN users - RETURN { name: u.name, friends: u.friends[* LIMIT 1,2].name } -``` - -The example query skips the first friend and returns two friends at most -per user: - -```json -[ - { "name": "john", "friends": [ "helga", "alfred" ] }, - { "name": "sandra", "friends": [ "elena" ] }, - { "name": "yves", "friends": [ "tiffany" ] } -] -``` - -### Inline projection - -To return a projection of the current element, use *RETURN*. If a *FILTER* is -also present, *RETURN* must come later. - -```js -FOR u IN users - RETURN u.friends[* RETURN CONCAT(CURRENT.name, " is a friend of ", u.name)] -``` - -The above will return: - -```json -[ - [ - "tina is a friend of john", - "helga is a friend of john", - "alfred is a friend of john" - ], - [ - "sergei is a friend of yves", - "tiffany is a friend of yves" - ], - [ - "bob is a friend of sandra", - "elena is a friend of sandra" - ] -] -``` \ No newline at end of file diff --git a/Documentation/Books/AQL/Advanced/README.md b/Documentation/Books/AQL/Advanced/README.md deleted file mode 100644 index fc81c451b1df..000000000000 --- a/Documentation/Books/AQL/Advanced/README.md +++ /dev/null @@ -1,7 +0,0 @@ -Advanced features -================= - -This section covers additional, powerful AQL features, which you may wanna look -into once you made yourself familiar with the basics of the query language. - -- [Array operators](ArrayOperators.md): Shorthands for array manipulation diff --git a/Documentation/Books/AQL/CommonErrors.md b/Documentation/Books/AQL/CommonErrors.md deleted file mode 100644 index 7c9c8966899f..000000000000 --- a/Documentation/Books/AQL/CommonErrors.md +++ /dev/null @@ -1,118 +0,0 @@ -Common Errors -============= - -Trailing semicolons in query strings ------------------------------------- - -Many SQL databases allow sending multiple queries at once. In this case, multiple -queries are seperated using the semicolon character. Often it is also supported to -execute a single query that has a semicolon at its end. - -AQL does not support this, and it is a parse error to use a semicolon at the end -of an AQL query string. - - -String concatenation --------------------- - -In AQL, strings must be concatenated using the [CONCAT()](Functions/String.md#concat) -function. Joining them together with the `+` operator is not supported. Especially -as JavaScript programmer it is easy to walk into this trap: - -```js -RETURN "foo" + "bar" // [ 0 ] -RETURN "foo" + 123 // [ 123 ] -RETURN "123" + 200 // [ 323 ] -``` - -The arithmetic plus operator expects numbers as operands, and will try to implicitly -cast them to numbers if they are of different type. `"foo"` and `"bar"` are casted -to `0` and then added to together (still zero). If an actual number is added, that -number will be returned (adding zero doesn't change the result). If the string is a -valid string representation of a number, then it is casted to a number. Thus, adding -`"123"` and `200` results in two numbers being added up to `323`. - -To concatenate elements (with implicit casting to string for non-string values), do: - -```js -RETURN CONCAT("foo", "bar") // [ "foobar" ] -RETURN CONCAT("foo", 123) // [ "foo123" ] -RETURN CONCAT("123", 200) // [ "123200" ] -``` - -Unexpected long running queries -------------------------------- - -Slow queries can have various reasons and be legitimate for queries with a high -computational complexity or if they touch a lot of data. Use the *Explain* -feature to inspect execution plans and verify that appropriate indexes are -utilized. Also check for mistakes such as references to the wrong variables. - -A literal collection name, which is not part of constructs like `FOR`, -`UPDATE ... IN` etc., stands for an array of all documents of that collection -and can cause an entire collection to be materialized before further -processing. It should thus be avoided. - -Check the execution plan for `/* all collection documents */` and verify that -it is intended. You should also see a warning if you execute such a query: - -> collection 'coll' used as expression operand - -For example, instead of: - -```js -RETURN coll[* LIMIT 1] -``` - -... with the execution plan ... - -``` -Execution plan: - Id NodeType Est. Comment - 1 SingletonNode 1 * ROOT - 2 CalculationNode 1 - LET #2 = coll /* all collection documents */[* LIMIT 0, 1] /* v8 expression */ - 3 ReturnNode 1 - RETURN #2 -``` - -... you can use the following equivalent query: - -```js -FOR doc IN coll - LIMIT 1 - RETURN doc -``` - -... with the (better) execution plan: - -``` -Execution plan: - Id NodeType Est. Comment - 1 SingletonNode 1 * ROOT - 2 EnumerateCollectionNode 44 - FOR doc IN Characters /* full collection scan */ - 3 LimitNode 1 - LIMIT 0, 1 - 4 ReturnNode 1 - RETURN doc -``` - -Similarly, make sure you have not confused any variable names with collection -names by accident: - -```js -LET names = ["John", "Mary", ...] -// supposed to refer to variable "names", not collection "Names" -FOR name IN Names - ... -``` - - diff --git a/Documentation/Books/AQL/DataQueries.md b/Documentation/Books/AQL/DataQueries.md deleted file mode 100644 index 21e2d7de6660..000000000000 --- a/Documentation/Books/AQL/DataQueries.md +++ /dev/null @@ -1,333 +0,0 @@ -Data Queries -============ - -Data Access Queries -------------------- - -Retrieving data from the database with AQL does always include a **RETURN** -operation. It can be used to return a static value, such as a string: - -```js -RETURN "Hello ArangoDB!" -``` - -The query result is always an array of elements, even if a single element was -returned and contains a single element in that case: `["Hello ArangoDB!"]` - -The function `DOCUMENT()` can be called to retrieve a single document via -its document handle, for instance: - -```js -RETURN DOCUMENT("users/phil") -``` - -*RETURN* is usually accompanied by a **FOR** loop to iterate over the -documents of a collection. The following query executes the loop body for all -documents of a collection called *users*. Each document is returned unchanged -in this example: - -```js -FOR doc IN users - RETURN doc -``` - -Instead of returning the raw `doc`, one can easily create a projection: - -```js -FOR doc IN users - RETURN { user: doc, newAttribute: true } -``` - -For every user document, an object with two attributes is returned. The value -of the attribute *user* is set to the content of the user document, and -*newAttribute* is a static attribute with the boolean value *true*. - -Operations like **FILTER**, **SORT** and **LIMIT** can be added to the loop body -to narrow and order the result. Instead of above shown call to `DOCUMENT()`, -one can also retrieve the document that describes user *phil* like so: - -```js -FOR doc IN users - FILTER doc._key == "phil" - RETURN doc -``` - -The document key is used in this example, but any other attribute could equally -be used for filtering. Since the document key is guaranteed to be unique, no -more than a single document will match this filter. For other attributes this -may not be the case. To return a subset of active users (determined by an -attribute called *status*), sorted by name in ascending order, you can do: - -```js -FOR doc IN users - FILTER doc.status == "active" - SORT doc.name - LIMIT 10 -``` - -Note that operations do not have to occur in a fixed order and that their order -can influence the result significantly. Limiting the number of documents -before a filter is usually not what you want, because it easily misses a lot -of documents that would fulfill the filter criterion, but are ignored because -of a premature *LIMIT* clause. Because of the aforementioned reasons, *LIMIT* -is usually put at the very end, after *FILTER*, *SORT* and other operations. - -See the [High Level Operations](Operations/README.md) chapter for more details. - -Data Modification Queries -------------------------- - -AQL supports the following data-modification operations: - -- **INSERT**: insert new documents into a collection -- **UPDATE**: partially update existing documents in a collection -- **REPLACE**: completely replace existing documents in a collection -- **REMOVE**: remove existing documents from a collection -- **UPSERT**: conditionally insert or update documents in a collection - -Below you find some simple example queries that use these operations. -The operations are detailed in the chapter [High Level Operations](Operations/README.md). - - -### Modifying a single document - -Let's start with the basics: `INSERT`, `UPDATE` and `REMOVE` operations on single documents. -Here is an example that insert a document in an existing collection *users*: - -```js -INSERT { - firstName: "Anna", - name: "Pavlova", - profession: "artist" -} IN users -``` - -You may provide a key for the new document; if not provided, ArangoDB will create one for you. - -```js -INSERT { - _key: "GilbertoGil", - firstName: "Gilberto", - name: "Gil", - city: "Fortalezza" -} IN users -``` - -As ArangoDB is schema-free, attributes of the documents may vary: - -```js -INSERT { - _key: "PhilCarpenter", - firstName: "Phil", - name: "Carpenter", - middleName: "G.", - status: "inactive" -} IN users -``` - -```js -INSERT { - _key: "NatachaDeclerck", - firstName: "Natacha", - name: "Declerck", - location: "Antwerp" -} IN users -``` - -Update is quite simple. The following AQL statement will add or change the attributes status and location - -```js -UPDATE "PhilCarpenter" WITH { - status: "active", - location: "Beijing" -} IN users -``` - -Replace is an alternative to update where all attributes of the document are replaced. - -```js -REPLACE { - _key: "NatachaDeclerck", - firstName: "Natacha", - name: "Leclerc", - status: "active", - level: "premium" -} IN users -``` - -Removing a document if you know its key is simple as well : - -```js -REMOVE "GilbertoGil" IN users -``` - -or - -```js -REMOVE { _key: "GilbertoGil" } IN users -``` - -### Modifying multiple documents - -Data-modification operations are normally combined with *FOR* loops to -iterate over a given list of documents. They can optionally be combined with -*FILTER* statements and the like. - -Let's start with an example that modifies existing documents in a collection -*users* that match some condition: - -```js -FOR u IN users - FILTER u.status == "not active" - UPDATE u WITH { status: "inactive" } IN users -``` - - -Now, let's copy the contents of the collection *users* into the collection -*backup*: - -```js -FOR u IN users - INSERT u IN backup -``` - -Subsequently, let's find some documents in collection *users* and remove them -from collection *backup*. The link between the documents in both collections is -established via the documents' keys: - -```js -FOR u IN users - FILTER u.status == "deleted" - REMOVE u IN backup -``` - -The following example will remove all documents from both *users* and *backup*: - -```js -LET r1 = (FOR u IN users REMOVE u IN users) -LET r2 = (FOR u IN backup REMOVE u IN backup) -RETURN true -``` - -### Returning documents - -Data-modification queries can optionally return documents. In order to reference -the inserted, removed or modified documents in a `RETURN` statement, data-modification -statements introduce the `OLD` and/or `NEW` pseudo-values: - -```js -FOR i IN 1..100 - INSERT { value: i } IN test - RETURN NEW -``` - -```js -FOR u IN users - FILTER u.status == "deleted" - REMOVE u IN users - RETURN OLD -``` - -```js -FOR u IN users - FILTER u.status == "not active" - UPDATE u WITH { status: "inactive" } IN users - RETURN NEW -``` - -`NEW` refers to the inserted or modified document revision, and `OLD` refers -to the document revision before update or removal. `INSERT` statements can -only refer to the `NEW` pseudo-value, and `REMOVE` operations only to `OLD`. -`UPDATE`, `REPLACE` and `UPSERT` can refer to either. - -In all cases the full documents will be returned with all their attributes, -including the potentially auto-generated attributes such as `_id`, `_key`, or `_rev` -and the attributes not specified in the update expression of a partial update. - -#### Projections - -It is possible to return a projection of the documents in `OLD` or `NEW` instead of -returning the entire documents. This can be used to reduce the amount of data returned -by queries. - -For example, the following query will return only the keys of the inserted documents: - -```js -FOR i IN 1..100 - INSERT { value: i } IN test - RETURN NEW._key -``` - -#### Using OLD and NEW in the same query - -For `UPDATE`, `REPLACE` and `UPSERT` statements, both `OLD` and `NEW` can be used -to return the previous revision of a document together with the updated revision: - -```js -FOR u IN users - FILTER u.status == "not active" - UPDATE u WITH { status: "inactive" } IN users - RETURN { old: OLD, new: NEW } -``` - -#### Calculations with OLD or NEW - -It is also possible to run additional calculations with `LET` statements between the -data-modification part and the final `RETURN` of an AQL query. For example, the following -query performs an upsert operation and returns whether an existing document was -updated, or a new document was inserted. It does so by checking the `OLD` variable -after the `UPSERT` and using a `LET` statement to store a temporary string for -the operation type: - -```js -UPSERT { name: "test" } - INSERT { name: "test" } - UPDATE { } IN users -LET opType = IS_NULL(OLD) ? "insert" : "update" -RETURN { _key: NEW._key, type: opType } -``` - -### Restrictions - -The name of the modified collection (*users* and *backup* in the above cases) -must be known to the AQL executor at query-compile time and cannot change at -runtime. Using a bind parameter to specify the -[collection name](../Manual/Appendix/Glossary.html#collection-name) is allowed. - -It is not possible to use multiple data-modification operations for the same -collection in the same query, or follow up a data-modification operation for a -specific collection with a read operation for the same collection. Neither is -it possible to follow up any data-modification operation with a traversal query -(which may read from arbitrary collections not necessarily known at the start of -the traversal). - -That means you may not place several `REMOVE` or `UPDATE` statements for the same -collection into the same query. It is however possible to modify different collections -by using multiple data-modification operations for different collections in the -same query. -In case you have a query with several places that need to remove documents from the -same collection, it is recommended to collect these documents or their keys in an array -and have the documents from that array removed using a single `REMOVE` operation. - -Data-modification operations can optionally be followed by `LET` operations to -perform further calculations and a `RETURN` operation to return data. - - -### Transactional Execution - -On a single server, data-modification operations are executed transactionally. -If a data-modification operation fails, any changes made by it will be rolled -back automatically as if they never happened. - -If the RocksDB engine is used and intermediate commits are enabled, a query may -execute intermediate transaction commits in case the running transaction (AQL -query) hits the specified size thresholds. In this case, the query's operations -carried out so far will be committed and not rolled back in case of a later abort/rollback. -That behavior can be controlled by adjusting the intermediate commit settings for -the RocksDB engine. - -In a cluster, AQL data-modification queries are currently not executed transactionally. -Additionally, *update*, *replace*, *upsert* and *remove* AQL queries currently -require the *_key* attribute to be specified for all documents that should be -modified or removed, even if a shared key attribute other than *_key* was chosen -for the collection. This restriction may be overcome in a future release of ArangoDB. diff --git a/Documentation/Books/AQL/Examples/CombiningGraphTraversals.md b/Documentation/Books/AQL/Examples/CombiningGraphTraversals.md deleted file mode 100644 index 68d0690441ab..000000000000 --- a/Documentation/Books/AQL/Examples/CombiningGraphTraversals.md +++ /dev/null @@ -1,99 +0,0 @@ -Combining Graph Traversals -========================== - -Finding the start vertex via a geo query ----------------------------------------- - -Our first example will locate the start vertex for a graph traversal via [a geo index](../../Manual/Indexing/Geo.html). -We use [the city graph](../../Manual/Graphs/index.html#the-city-graph) and its geo indices: - -![Cities Example Graph](../../Manual/Graphs/cities_graph.png) - - @startDocuBlockInline COMBINING_GRAPH_01_create_graph - @EXAMPLE_ARANGOSH_OUTPUT{COMBINING_GRAPH_01_create_graph} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var g = examples.loadGraph("routeplanner"); - ~examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock COMBINING_GRAPH_01_create_graph - -We search all german cities in a range of 400 km around the ex-capital **Bonn**: **Hamburg** and **Cologne**. -We won't find **Paris** since its in the `frenchCity` collection. - - @startDocuBlockInline COMBINING_GRAPH_02_show_geo - @EXAMPLE_AQL{COMBINING_GRAPH_02_show_geo} - @DATASET{routeplanner} - FOR startCity IN germanCity - FILTER GEO_DISTANCE(@bonn, startCity.geometry) < @radius - RETURN startCity._key - @BV { - bonn: [7.0998, 50.7340], - radius: 400000 - } - @END_EXAMPLE_AQL - @endDocuBlock COMBINING_GRAPH_02_show_geo - -Lets revalidate that the geo indices are actually used: - - @startDocuBlockInline COMBINING_GRAPH_03_explain_geo - @EXAMPLE_AQL{COMBINING_GRAPH_03_explain_geo} - @DATASET{routeplanner} - @EXPLAIN{TRUE} - FOR startCity IN germanCity - FILTER GEO_DISTANCE(@bonn, startCity.geometry) < @radius - RETURN startCity._key - @BV { - bonn: [7.0998, 50.7340], - radius: 400000 - } - @END_EXAMPLE_AQL - @endDocuBlock COMBINING_GRAPH_03_explain_geo - -And now combine this with a graph traversal: - - @startDocuBlockInline COMBINING_GRAPH_04_combine - @EXAMPLE_AQL{COMBINING_GRAPH_04_combine} - @DATASET{routeplanner} - FOR startCity IN germanCity - FILTER GEO_DISTANCE(@bonn, startCity.geometry) < @radius - FOR v, e, p IN 1..1 OUTBOUND startCity - GRAPH 'routeplanner' - RETURN {startcity: startCity._key, traversedCity: v._key} - @BV { - bonn: [7.0998, 50.7340], - radius: 400000 - } - @END_EXAMPLE_AQL - @endDocuBlock COMBINING_GRAPH_04_combine - -The geo index query returns us `startCity` (**Cologne** and **Hamburg**) which we then use as starting point for our graph traversal. -For simplicity we only return their direct neighbours. We format the return result so we can see from which `startCity` the traversal came. - -Alternatively we could use a `LET` statement with a subquery to group the traversals by their `startCity` efficiently: - - @startDocuBlockInline COMBINING_GRAPH_05_combine_let - @EXAMPLE_AQL{COMBINING_GRAPH_05_combine_let} - @DATASET{routeplanner} - FOR startCity IN germanCity - FILTER GEO_DISTANCE(@bonn, startCity.geometry) < @radius - LET oneCity = ( - FOR v, e, p IN 1..1 OUTBOUND startCity - GRAPH 'routeplanner' RETURN v._key - ) - RETURN {startCity: startCity._key, connectedCities: oneCity} - @BV { - bonn: [7.0998, 50.7340], - radius: 400000 - } - @END_EXAMPLE_AQL - @endDocuBlock COMBINING_GRAPH_05_combine_let - -Finally, we clean up again: - - @startDocuBlockInline COMBINING_GRAPH_06_cleanup - @EXAMPLE_ARANGOSH_OUTPUT{COMBINING_GRAPH_06_cleanup} - ~var examples = require("@arangodb/graph-examples/example-graph.js"); - ~var g = examples.loadGraph("routeplanner"); - examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock COMBINING_GRAPH_06_cleanup diff --git a/Documentation/Books/AQL/Examples/CombiningQueries.md b/Documentation/Books/AQL/Examples/CombiningQueries.md deleted file mode 100644 index ff0dec8beda0..000000000000 --- a/Documentation/Books/AQL/Examples/CombiningQueries.md +++ /dev/null @@ -1,60 +0,0 @@ -Combining queries -================= - -Subqueries ----------- - -Wherever an expression is allowed in AQL, a subquery can be placed. A subquery -is a query part that can introduce its own local variables without affecting -variables and values in its outer scope(s). - -It is required that subqueries be put inside parentheses *(* and *)* to -explicitly mark their start and end points: - -```js -FOR p IN persons - LET recommendations = ( - FOR r IN recommendations - FILTER p.id == r.personId - SORT p.rank DESC - LIMIT 10 - RETURN r - ) - RETURN { person : p, recommendations : recommendations } -``` - -```js -FOR p IN persons - COLLECT city = p.city INTO g - RETURN { - city : city, - numPersons : LENGTH(g), - maxRating: MAX( - FOR r IN g - RETURN r.p.rating - )} -``` - -Subqueries may also include other subqueries. - -Note that subqueries always return a result **array**, even if there is only -a single return value: - -```js -RETURN ( RETURN 1 ) -``` - -```json -[ [ 1 ] ] -``` - -To avoid such a nested data structure, [FIRST()](../Functions/Array.md#first) -can be used for example: - -```js -RETURN FIRST( RETURN 1 ) -``` - -```json -[ 1 ] -``` diff --git a/Documentation/Books/AQL/Examples/Counting.md b/Documentation/Books/AQL/Examples/Counting.md deleted file mode 100644 index 76aa9d7f6130..000000000000 --- a/Documentation/Books/AQL/Examples/Counting.md +++ /dev/null @@ -1,25 +0,0 @@ -Counting -======== - -Amount of documents in a collection ------------------------------------ - -To return the count of documents that currently exist in a collection, -you can call the [LENGTH() function](../Functions/Array.md#length): - -``` -RETURN LENGTH(collection) -``` - -This type of call is optimized since 2.8 (no unnecessary intermediate result -is built up in memory) and it is therefore the preferred way to determine the count. -Internally, [COLLECTION_COUNT()](../Functions/Miscellaneous.md#collectioncount) is called. - -In earlier versions with `COLLECT ... WITH COUNT INTO` available (since 2.4), -you may use the following code instead of *LENGTH()* for better performance: - -``` -FOR doc IN collection - COLLECT WITH COUNT INTO length - RETURN length -``` diff --git a/Documentation/Books/AQL/Examples/DataModificationQueries.md b/Documentation/Books/AQL/Examples/DataModificationQueries.md deleted file mode 100644 index 2c03440484cb..000000000000 --- a/Documentation/Books/AQL/Examples/DataModificationQueries.md +++ /dev/null @@ -1,267 +0,0 @@ -Data-modification queries -========================= - -The following operations can be used to modify data of multiple documents -with one query. This is superior to fetching and updating the documents individually -with multiple queries. However, if only a single document needs to be modified, -ArangoDB's specialized data-modification operations for single documents -might execute faster. - -Updating documents ------------------- - -To update existing documents, we can either use the *UPDATE* or the *REPLACE* -operation. *UPDATE* updates only the specified attributes in the found documents, -and *REPLACE* completely replaces the found documents with the specified values. - -We'll start with an *UPDATE* query that rewrites the gender attribute in all -documents: - -```js -FOR u IN users - UPDATE u WITH { gender: TRANSLATE(u.gender, { m: 'male', f: 'female' }) } IN users -``` - -To add new attributes to existing documents, we can also use an *UPDATE* query. -The following query adds an attribute *numberOfLogins* for all users with status -active: - -```js -FOR u IN users - FILTER u.active == true - UPDATE u WITH { numberOfLogins: 0 } IN users -``` - -Existing attributes can also be updated based on their previous value: - -```js -FOR u IN users - FILTER u.active == true - UPDATE u WITH { numberOfLogins: u.numberOfLogins + 1 } IN users -``` - -The above query will only work if there was already a *numberOfLogins* attribute -present in the document. If it is unsure whether there is a *numberOfLogins* -attribute in the document, the increase must be made conditional: - -```js -FOR u IN users - FILTER u.active == true - UPDATE u WITH { - numberOfLogins: HAS(u, 'numberOfLogins') ? u.numberOfLogins + 1 : 1 - } IN users -``` - -Updates of multiple attributes can be combined in a single query: - -```js -FOR u IN users - FILTER u.active == true - UPDATE u WITH { - lastLogin: DATE_NOW(), - numberOfLogins: HAS(u, 'numberOfLogins') ? u.numberOfLogins + 1 : 1 - } IN users -``` - -Note than an update query might fail during execution, for example because a -document to be updated does not exist. In this case, the query will abort at -the first error. In single-server mode, all modifications done by the query will -be rolled back as if they never happened. - - -Replacing documents -------------------- - -To not just partially update, but completely replace existing documents, use -the *REPLACE* operation. -The following query replaces all documents in the collection backup with -the documents found in collection users. Documents common to both -collections will be replaced. All other documents will remain unchanged. -Documents are compared using their *_key* attributes: - -```js -FOR u IN users - REPLACE u IN backup -``` - -The above query will fail if there are documents in collection users that are -not in collection backup yet. In this case, the query would attempt to replace -documents that do not exist. If such case is detected while executing the query, -the query will abort. In single-server mode, all changes made by the query will -also be rolled back. - -To make the query succeed for such case, use the *ignoreErrors* query option: - -```js -FOR u IN users - REPLACE u IN backup OPTIONS { ignoreErrors: true } -``` - - -Removing documents ------------------- - -Deleting documents can be achieved with the *REMOVE* operation. -To remove all users within a certain age range, we can use the following query: - -```js -FOR u IN users - FILTER u.active == true && u.age >= 35 && u.age <= 37 - REMOVE u IN users -``` - - -Creating documents ------------------- - -To create new documents, there is the *INSERT* operation. -It can also be used to generate copies of existing documents from other collections, -or to create synthetic documents (e.g. for testing purposes). The following -query creates 1000 test users in collection users with some attributes set: - -```js -FOR i IN 1..1000 - INSERT { - id: 100000 + i, - age: 18 + FLOOR(RAND() * 25), - name: CONCAT('test', TO_STRING(i)), - active: false, - gender: i % 2 == 0 ? 'male' : 'female' - } IN users -``` - - -Copying data from one collection into another ---------------------------------------------- - -To copy data from one collection into another, an *INSERT* operation can be -used: - -```js -FOR u IN users - INSERT u IN backup -``` - -This will copy over all documents from collection users into collection -backup. Note that both collections must already exist when the query is -executed. The query might fail if backup already contains documents, as -executing the insert might attempt to insert the same document (identified -by *_key* attribute) again. This will trigger a unique key constraint violation -and abort the query. In single-server mode, all changes made by the query -will also be rolled back. -To make such copy operation work in all cases, the target collection can -be emptied before, using a *REMOVE* query. - - -Handling errors ---------------- - -In some cases it might be desirable to continue execution of a query even in -the face of errors (e.g. "document not found"). To continue execution of a -query in case of errors, there is the *ignoreErrors* option. - -To use it, place an *OPTIONS* keyword directly after the data modification -part of the query, e.g. - -```js -FOR u IN users - REPLACE u IN backup OPTIONS { ignoreErrors: true } -``` - -This will continue execution of the query even if errors occur during the -*REPLACE* operation. It works similar for *UPDATE*, *INSERT*, and *REMOVE*. - - -Altering substructures ----------------------- - -To modify lists in documents we have to work with temporary variables. -We will collect the sublist in there and alter it. We choose a simple -boolean filter condition to make the query better comprehensible. - -First lets create a collection with a sample: - -```js -database = db._create('complexCollection') -database.save({ - "topLevelAttribute" : "a", - "subList" : [ - { - "attributeToAlter" : "oldValue", - "filterByMe" : true - }, - { - "attributeToAlter" : "moreOldValues", - "filterByMe" : true - }, - { - "attributeToAlter" : "unchangedValue", - "filterByMe" : false - } - ] -}) -``` - -Heres the Query which keeps the *subList* on *alteredList* to update it later: - -```js -FOR document in complexCollection - LET alteredList = ( - FOR element IN document.subList - LET newItem = (! element.filterByMe ? - element : - MERGE(element, { attributeToAlter: "shiny New Value" })) - RETURN newItem) - UPDATE document WITH { subList: alteredList } IN complexCollection -``` - -While the query as it is is now functional: - -```js -db.complexCollection.toArray() -[ - { - "_id" : "complexCollection/392671569467", - "_key" : "392671569467", - "_rev" : "392799430203", - "topLevelAttribute" : "a", - "subList" : [ - { - "filterByMe" : true, - "attributeToAlter" : "shiny New Value" - }, - { - "filterByMe" : true, - "attributeToAlter" : "shiny New Value" - }, - { - "filterByMe" : false, - "attributeToAlter" : "unchangedValue" - } - ] - } -] -``` - -It will probably be soonish a performance bottleneck, since it **modifies** -all documents in the collection **regardless whether the values change or not**. -Therefore we want to only *UPDATE* the documents if we really change their value. -Hence we employ a second *FOR* to test whether *subList* will be altered or not: - -```js -FOR document in complexCollection - LET willUpdateDocument = ( - FOR element IN docToAlter.subList - FILTER element.filterByMe LIMIT 1 RETURN 1) - - FILTER LENGTH(willUpdateDocument) > 0 - - LET alteredList = ( - FOR element IN document.subList - LET newItem = (! element.filterByMe ? - element : - MERGE(element, { attributeToAlter: "shiny New Value" })) - RETURN newItem) - - UPDATE document WITH { subList: alteredList } IN complexCollection -``` diff --git a/Documentation/Books/AQL/Examples/Grouping.md b/Documentation/Books/AQL/Examples/Grouping.md deleted file mode 100644 index a42213ad5084..000000000000 --- a/Documentation/Books/AQL/Examples/Grouping.md +++ /dev/null @@ -1,324 +0,0 @@ -Grouping -======== - -To group results by arbitrary criteria, AQL provides the *COLLECT* keyword. -*COLLECT* will perform a grouping, but no aggregation. Aggregation can still be -added in the query if required. - -Ensuring uniqueness -------------------- - -*COLLECT* can be used to make a result set unique. The following query will return each distinct -`age` attribute value only once: - -```js -FOR u IN users - COLLECT age = u.age - RETURN age -``` - -This is grouping without tracking the group values, but just the group criterion (*age*) value. - -Grouping can also be done on multiple levels using *COLLECT*: - -```js -FOR u IN users - COLLECT status = u.status, age = u.age - RETURN { status, age } -``` - - -Alternatively *RETURN DISTINCT* can be used to make a result set unique. *RETURN DISTINCT* supports a -single criterion only: - -```js -FOR u IN users - RETURN DISTINCT u.age -``` - -Note: the order of results is undefined for *RETURN DISTINCT*. - -Fetching group values ---------------------- - -To group users by age, and return the names of the users with the highest ages, -we'll issue a query like this: - -```js -FOR u IN users - FILTER u.active == true - COLLECT age = u.age INTO usersByAge - SORT age DESC LIMIT 0, 5 - RETURN { - age, - users: usersByAge[*].u.name - } -``` - -```json -[ - { "age": 37, "users": [ "John", "Sophia" ] }, - { "age": 36, "users": [ "Fred", "Emma" ] }, - { "age": 34, "users": [ "Madison" ] }, - { "age": 33, "users": [ "Chloe", "Michael" ] }, - { "age": 32, "users": [ "Alexander" ] } -] -``` - -The query will put all users together by their *age* attribute. There will be one -result document per distinct *age* value (let aside the *LIMIT*). For each group, -we have access to the matching document via the *usersByAge* variable introduced in -the *COLLECT* statement. - -Variable Expansion ------------------- - -The *usersByAge* variable contains the full documents found, and as we're only -interested in user names, we'll use the expansion operator [\*] to extract just the -*name* attribute of all user documents in each group: - -```js -usersByAge[*].u.name -``` - -The [\*] expansion operator is just a handy short-cut. We could also write -a subquery: - -```js -( FOR temp IN usersByAge RETURN temp.u.name ) -``` - -Grouping by multiple criteria ------------------------------ - -To group by multiple criteria, we'll use multiple arguments in the *COLLECT* clause. -For example, to group users by *ageGroup* (a derived value we need to calculate first) -and then by *gender*, we'll do: - -```js -FOR u IN users - FILTER u.active == true - COLLECT ageGroup = FLOOR(u.age / 5) * 5, - gender = u.gender INTO group - SORT ageGroup DESC - RETURN { - ageGroup, - gender - } -``` - -```json -[ - { "ageGroup": 35, "gender": "f" }, - { "ageGroup": 35, "gender": "m" }, - { "ageGroup": 30, "gender": "f" }, - { "ageGroup": 30, "gender": "m" }, - { "ageGroup": 25, "gender": "f" }, - { "ageGroup": 25, "gender": "m" } -] -``` - -Counting group values ---------------------- - -If the goal is to count the number of values in each group, AQL provides the special -*COLLECT WITH COUNT INTO* syntax. This is a simple variant for grouping with an additional -group length calculation: - -```js -FOR u IN users - FILTER u.active == true - COLLECT ageGroup = FLOOR(u.age / 5) * 5, - gender = u.gender WITH COUNT INTO numUsers - SORT ageGroup DESC - RETURN { - ageGroup, - gender, - numUsers - } -``` - -```json -[ - { "ageGroup": 35, "gender": "f", "numUsers": 2 }, - { "ageGroup": 35, "gender": "m", "numUsers": 2 }, - { "ageGroup": 30, "gender": "f", "numUsers": 4 }, - { "ageGroup": 30, "gender": "m", "numUsers": 4 }, - { "ageGroup": 25, "gender": "f", "numUsers": 2 }, - { "ageGroup": 25, "gender": "m", "numUsers": 2 } -] -``` - -Aggregation ------------ - -Adding further aggregation is also simple in AQL by using an *AGGREGATE* clause -in the *COLLECT*: - -```js -FOR u IN users - FILTER u.active == true - COLLECT ageGroup = FLOOR(u.age / 5) * 5, - gender = u.gender - AGGREGATE numUsers = LENGTH(1), - minAge = MIN(u.age), - maxAge = MAX(u.age) - SORT ageGroup DESC - RETURN { - ageGroup, - gender, - numUsers, - minAge, - maxAge - } -``` - -```json -[ - { - "ageGroup": 35, - "gender": "f", - "numUsers": 2, - "minAge": 36, - "maxAge": 39, - }, - { - "ageGroup": 35, - "gender": "m", - "numUsers": 2, - "minAge": 35, - "maxAge": 39, - }, - ... -] -``` - -We have used the aggregate functions *LENGTH* here (it returns the length of an array). -This is the equivalent to SQL's `SELECT g, COUNT(*) FROM ... GROUP BY g`. In addition to -*LENGTH*, AQL also provides *MAX*, *MIN*, *SUM* and *AVERAGE*, *VARIANCE_POPULATION*, -*VARIANCE_SAMPLE*, *STDDEV_POPULATION*, *STDDEV_SAMPLE*, *UNIQUE*, *SORTED_UNIQUE* and -*COUNT_UNIQUE* as basic aggregation functions. - -In AQL all aggregation functions can be run on arrays only. If an aggregation function -is run on anything that is not an array, a warning will be produced and the result will -be *null*. - -Using an *AGGREGATE* clause will ensure the aggregation is run while the groups are built -in the collect operation. This is normally more efficient than collecting all group values -for all groups and then doing a post-aggregation. - - -Post-aggregation ----------------- - -Aggregation can also be performed after a *COLLECT* operation using other AQL constructs, -though performance-wise this is often inferior to using *COLLECT* with *AGGREGATE*. - -The same query as before can be turned into a post-aggregation query as shown below. Note -that this query will build and pass on all group values for all groups inside the variable -*g*, and perform the aggregation at the latest possible stage: - -```js -FOR u IN users - FILTER u.active == true - COLLECT ageGroup = FLOOR(u.age / 5) * 5, - gender = u.gender INTO g - SORT ageGroup DESC - RETURN { - ageGroup, - gender, - numUsers: LENGTH(g[*]), - minAge: MIN(g[*].u.age), - maxAge: MAX(g[*].u.age) - } -``` - -```json -[ - { - "ageGroup": 35, - "gender": "f", - "numUsers": 2, - "minAge": 36, - "maxAge": 39, - }, - { - "ageGroup": 35, - "gender": "m", - "numUsers": 2, - "minAge": 35, - "maxAge": 39, - }, - ... -] -``` - -This is in constrast to the previous query that used an *AGGREGATE* clause to perform -the aggregation during the collect operation, at the earliest possible stage. - - -Post-filtering aggregated data ------------------------------- - -To filter the results of a grouping or aggregation operation (i.e. something -similar to *HAVING* in SQL), simply add another *FILTER* clause after the *COLLECT* -statement. - -For example, to get the 3 *ageGroup*s with the most users in them: - -```js -FOR u IN users - FILTER u.active == true - COLLECT ageGroup = FLOOR(u.age / 5) * 5 INTO group - LET numUsers = LENGTH(group) - FILTER numUsers > 2 /* group must contain at least 3 users in order to qualify */ - SORT numUsers DESC - LIMIT 0, 3 - RETURN { - "ageGroup": ageGroup, - "numUsers": numUsers, - "users": group[*].u.name - } -``` - -```json -[ - { - "ageGroup": 30, - "numUsers": 8, - "users": [ - "Abigail", - "Madison", - "Anthony", - "Alexander", - "Isabella", - "Chloe", - "Daniel", - "Michael" - ] - }, - { - "ageGroup": 25, - "numUsers": 4, - "users": [ - "Mary", - "Mariah", - "Jim", - "Diego" - ] - }, - { - "ageGroup": 35, - "numUsers": 4, - "users": [ - "Fred", - "John", - "Emma", - "Sophia" - ] - } -] -``` - -To increase readability, the repeated expression *LENGTH(group)* was put into a variable -*numUsers*. The *FILTER* on *numUsers* is the equivalent an SQL *HAVING* clause. diff --git a/Documentation/Books/AQL/Examples/Join.md b/Documentation/Books/AQL/Examples/Join.md deleted file mode 100644 index 20dc8b9c3713..000000000000 --- a/Documentation/Books/AQL/Examples/Join.md +++ /dev/null @@ -1,246 +0,0 @@ -Joins -===== - -So far we have only dealt with one collection (*users*) at a time. We also have a -collection *relations* that stores relationships between users. We will now use -this extra collection to create a result from two collections. - -First of all, we'll query a few users together with their friends' ids. For that, -we'll use all *relations* that have a value of *friend* in their *type* attribute. -Relationships are established by using the *friendOf* and *thisUser* attributes in the -*relations* collection, which point to the *userId* values in the *users* collection. - -Join tuples ------------ - -We'll start with a SQL-ish result set and return each tuple (user name, friends userId) -separately. The AQL query to generate such result is: - - - @startDocuBlockInline joinTuples - @EXAMPLE_AQL{joinTuples} - @DATASET{joinSampleDataset} - FOR u IN users - FILTER u.active == true - LIMIT 0, 4 - FOR f IN relations - FILTER f.type == @friend && f.friendOf == u.userId - RETURN { - "user" : u.name, - "friendId" : f.thisUser - } - @BV { - friend: "friend" - } - @END_EXAMPLE_AQL - @endDocuBlock joinTuples - -We iterate over the collection users. Only the 'active' users will be examined. -For each of these users we will search for up to 4 friends. We locate friends -by comparing the *userId* of our current user with the *friendOf* attribute of the -*relations* document. For each of those relations found we return the users name -and the userId of the friend. - - -Horizontal lists ----------------- - - -Note that in the above result, a user can be returned multiple times. This is the -SQL way of returning data. If this is not desired, the friends' ids of each user -can be returned in a horizontal list. This will return each user at most once. - -The AQL query for doing so is: - -```js -FOR u IN users - FILTER u.active == true LIMIT 0, 4 - RETURN { - "user" : u.name, - "friendIds" : ( - FOR f IN relations - FILTER f.friendOf == u.userId && f.type == "friend" - RETURN f.thisUser - ) - } -``` - -```json -[ - { - "user" : "Abigail", - "friendIds" : [ - 108, - 102, - 106 - ] - }, - { - "user" : "Fred", - "friendIds" : [ - 209 - ] - }, - { - "user" : "Mary", - "friendIds" : [ - 207, - 104 - ] - }, - { - "user" : "Mariah", - "friendIds" : [ - 203, - 205 - ] - } -] -``` - -In this query we are still iterating over the users in the *users* collection -and for each matching user we are executing a subquery to create the matching -list of related users. - -Self joins ----------- - -To not only return friend ids but also the names of friends, we could "join" the -*users* collection once more (something like a "self join"): - -```js -FOR u IN users - FILTER u.active == true - LIMIT 0, 4 - RETURN { - "user" : u.name, - "friendIds" : ( - FOR f IN relations - FILTER f.friendOf == u.userId && f.type == "friend" - FOR u2 IN users - FILTER f.thisUser == u2.useId - RETURN u2.name - ) - } -``` - -```json -[ - { - "user" : "Abigail", - "friendIds" : [ - "Jim", - "Jacob", - "Daniel" - ] - }, - { - "user" : "Fred", - "friendIds" : [ - "Mariah" - ] - }, - { - "user" : "Mary", - "friendIds" : [ - "Isabella", - "Michael" - ] - }, - { - "user" : "Mariah", - "friendIds" : [ - "Madison", - "Eva" - ] - } -] -``` - -This query will then again in term fetch the clear text name of the -friend from the users collection. So here we iterate the users collection, -and for each hit the relations collection, and for each hit once more the -users collection. - -Outer joins ------------ - -Lets find the lonely people in our database - those without friends. - -```js - -FOR user IN users - LET friendList = ( - FOR f IN relations - FILTER f.friendOf == u.userId - RETURN 1 - ) - FILTER LENGTH(friendList) == 0 - RETURN { "user" : user.name } -``` - -```json -[ - { - "user" : "Abigail" - }, - { - "user" : "Fred" - } -] -``` - -So, for each user we pick the list of their friends and count them. The ones where -count equals zero are the lonely people. Using *RETURN 1* in the subquery -saves even more precious CPU cycles and gives the optimizer more alternatives. - -Index usage ------------ - -Especially on joins you should [make sure indices can be used to speed up your query.](../ExecutionAndPerformance/ExplainingQueries.md) -Please note that sparse indices don't qualify for joins: - -In joins you typically would also want to join documents not containing the property -you join with. However sparse indices don't contain references to documents that -don't contain the indexed attributes - thus they would be missing from the join operation. -For that reason you should provide non-sparse indices. - -Pitfalls --------- - -Since we're free of schemata, there is by default no way to tell the format of the -documents. So, if your documents don't contain an attribute, it defaults to -null. We can however check our data for accuracy like this: - -```js -RETURN LENGTH(FOR u IN users FILTER u.userId == null RETURN 1) -``` - -```json -[ - 10000 -] -``` - -```js -RETURN LENGTH(FOR f IN relations FILTER f.friendOf == null RETURN 1) -``` - -```json -[ - 10000 -] -``` - -So if the above queries return 10k matches each, the result of the Join tuples -query will become 100,000,000 items larger and use much memory plus computation -time. So it is generally a good idea to revalidate that the criteria for your -join conditions exist. - -Using indices on the properties can speed up the operation significantly. -You can use the explain helper to revalidate your query actually uses them. - -If you work with joins on edge collections you would typically aggregate over -the internal fields *_id*, *_from* and *_to* (where *_id* equals *userId*, -*_from* *friendOf* and *_to* would be *thisUser* in our examples). ArangoDB -implicitly creates indices on them. diff --git a/Documentation/Books/AQL/Examples/MultiplePaths.md b/Documentation/Books/AQL/Examples/MultiplePaths.md deleted file mode 100644 index 18cee4dfcf55..000000000000 --- a/Documentation/Books/AQL/Examples/MultiplePaths.md +++ /dev/null @@ -1,43 +0,0 @@ -Multiple Path Search -==================== - -The shortest path algorithm can only determine one shortest path. -For example, if this is the full graph (based on the [mps_graph](../../Manual/Graphs/index.html#the-mps-graph)): - -![Example Graph](../../Manual/Graphs/mps_graph.png) - -then a shortest path query from **A** to **C** may return the path `A -> B -> C` or `A -> D -> C`, but it's undefined which one (not taking edge weights into account here). - -You can use the efficient shortest path algorithm however, to determine the shortest path length: - - - @startDocuBlockInline GRAPHTRAV_multiplePathSearch - @EXAMPLE_AQL{GRAPHTRAV_multiplePathSearch} - @DATASET{mps_graph} - RETURN LENGTH( - FOR v IN OUTBOUND - SHORTEST_PATH "mps_verts/A" TO "mps_verts/C" mps_edges - RETURN v - ) - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_multiplePathSearch - - -The result is 3 for the example graph (includes the start vertex). Now, subtract 1 to get the edge count / traversal depth. You can run a pattern matching traversal to find all paths with this length (or longer ones by increasing the min and max depth). Starting point is **A** again, and a filter on the document ID of v (or p.vertices[-1]) ensures that we only retrieve paths that end at point **C**. - -The following query returns all parts with length 2, start vertex **A** and target vertex **C**: - - - @startDocuBlockInline GRAPHTRAV_multiplePathSearch2 - @EXAMPLE_AQL{GRAPHTRAV_multiplePathSearch2} - @DATASET{mps_graph} - FOR v, e, p IN 2..2 OUTBOUND "mps_verts/A" mps_edges - FILTER v._id == "mps_verts/C" - RETURN CONCAT_SEPARATOR(" -> ", p.vertices[*]._key) - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_multiplePathSearch2 - - -A traversal depth of `3..3` would return `A -> E -> F -> C` and `2..3` all three paths. - -Note that two separate queries are required to compute the shortest path length and to do the pattern matching based on the shortest path length (minus 1), because min and max depth can't be expressions (they have to be known in advance, so either be number literals or bind parameters). diff --git a/Documentation/Books/AQL/Examples/ProjectionsAndFilters.md b/Documentation/Books/AQL/Examples/ProjectionsAndFilters.md deleted file mode 100644 index b6bf2445505e..000000000000 --- a/Documentation/Books/AQL/Examples/ProjectionsAndFilters.md +++ /dev/null @@ -1,134 +0,0 @@ -Projections and Filters -======================= - -Returning unaltered documents ------------------------------ - -To return three complete documents from collection *users*, the following query can be used: - -```js -FOR u IN users - LIMIT 0, 3 - RETURN u -``` - -```json -[ - { - "_id" : "users/229886047207520", - "_rev" : "229886047207520", - "_key" : "229886047207520", - "active" : true, - "id" : 206, - "age" : 31, - "gender" : "f", - "name" : "Abigail" - }, - { - "_id" : "users/229886045175904", - "_rev" : "229886045175904", - "_key" : "229886045175904", - "active" : true, - "id" : 101, - "age" : 36, - "name" : "Fred", - "gender" : "m" - }, - { - "_id" : "users/229886047469664", - "_rev" : "229886047469664", - "_key" : "229886047469664", - "active" : true, - "id" : 208, - "age" : 29, - "name" : "Mary", - "gender" : "f" - } -] -``` - -Note that there is a *LIMIT* clause but no *SORT* clause. In this case it is not guaranteed -which of the user documents are returned. Effectively the document return order is unspecified -if no *SORT* clause is used, and you should not rely on the order in such queries. - -Projections ------------ - -To return a projection from the collection *users* use a modified *RETURN* instruction: - -```js -FOR u IN users - LIMIT 0, 3 - RETURN { - "user" : { - "isActive" : u.active ? "yes" : "no", - "name" : u.name - } - } -``` - -```json -[ - { - "user" : { - "isActive" : "yes", - "name" : "John" - } - }, - { - "user" : { - "isActive" : "yes", - "name" : "Anthony" - } - }, - { - "user" : { - "isActive" : "yes", - "name" : "Fred" - } - } -] -``` - -Filters -------- - -To return a filtered projection from collection *users*, you can use the -*FILTER* keyword. Additionally, a *SORT* clause is used to have the result -returned in a specific order: - -```js -FOR u IN users - FILTER u.active == true && u.age >= 30 - SORT u.age DESC - LIMIT 0, 5 - RETURN { - "age" : u.age, - "name" : u.name - } -``` - -```json -[ - { - "age" : 37, - "name" : "Sophia" - }, - { - "age" : 37, - "name" : "John" - }, - { - "age" : 36, - "name" : "Emma" - }, - { - "age" : 36, - "name" : "Fred" - }, - { - "age" : 34, - "name" : "Madison" - } -] -``` diff --git a/Documentation/Books/AQL/Examples/QueriesNoCollections.md b/Documentation/Books/AQL/Examples/QueriesNoCollections.md deleted file mode 100644 index 02e72fcea2eb..000000000000 --- a/Documentation/Books/AQL/Examples/QueriesNoCollections.md +++ /dev/null @@ -1,40 +0,0 @@ -Queries without collections -=========================== - - -Following is a query that returns a string value. The result string is contained in an array -because the result of every valid query is an array: - -```js -RETURN "this will be returned" -[ - "this will be returned" -] -``` - -Here is a query that creates the cross products of two arrays and runs a projection -on it, using a few of AQL's built-in functions: - -```js -FOR year in [ 2011, 2012, 2013 ] - FOR quarter IN [ 1, 2, 3, 4 ] - RETURN { - "y" : "year", - "q" : quarter, - "nice" : CONCAT(quarter, "/", year) - } -[ - { "y" : "year", "q" : 1, "nice" : "1/2011" }, - { "y" : "year", "q" : 2, "nice" : "2/2011" }, - { "y" : "year", "q" : 3, "nice" : "3/2011" }, - { "y" : "year", "q" : 4, "nice" : "4/2011" }, - { "y" : "year", "q" : 1, "nice" : "1/2012" }, - { "y" : "year", "q" : 2, "nice" : "2/2012" }, - { "y" : "year", "q" : 3, "nice" : "3/2012" }, - { "y" : "year", "q" : 4, "nice" : "4/2012" }, - { "y" : "year", "q" : 1, "nice" : "1/2013" }, - { "y" : "year", "q" : 2, "nice" : "2/2013" }, - { "y" : "year", "q" : 3, "nice" : "3/2013" }, - { "y" : "year", "q" : 4, "nice" : "4/2013" } -] -``` diff --git a/Documentation/Books/AQL/Examples/README.md b/Documentation/Books/AQL/Examples/README.md deleted file mode 100644 index 771070ebfe62..000000000000 --- a/Documentation/Books/AQL/Examples/README.md +++ /dev/null @@ -1,113 +0,0 @@ -Usual Query Patterns Examples -============================= - -These pages contain some common query patterns with examples. For better -understandability the query results are also included directly below each query. - -Normally, you would want to run queries on data stored in collections. This section -will provide several examples for that. - -Some of the following example queries are executed on a collection 'users' with the data provided here below. - - -Things to consider when running queries on collections ------------------------------------------------------- - -Note that all documents created in any collections will automatically get the -following server-generated attributes: - -- *_id*: A unique id, consisting of [collection name](../../Manual/Appendix/Glossary.html#collection-name) - and a server-side sequence value -- *_key*: The server sequence value -- *_rev*: The document's revision id - -Whenever you run queries on the documents in collections, don't be surprised if -these additional attributes are returned as well. - -Please also note that with real-world data, you might want to create additional -indexes on the data (left out here for brevity). Adding indexes on attributes that are -used in *FILTER* statements may considerably speed up queries. Furthermore, instead of -using attributes such as *id*, *from* and *to*, you might want to use the built-in -*_id*, *_from* and *_to* attributes. Finally, [edge collection](../../Manual/Appendix/Glossary.html#edge-collection)s provide a nice way of -establishing references / links between documents. These features have been left out here -for brevity as well. - - -Example data ------------- - -Some of the following example queries are executed on a collection *users* -with the following initial data: - -```json -[ - { "id": 100, "name": "John", "age": 37, "active": true, "gender": "m" }, - { "id": 101, "name": "Fred", "age": 36, "active": true, "gender": "m" }, - { "id": 102, "name": "Jacob", "age": 35, "active": false, "gender": "m" }, - { "id": 103, "name": "Ethan", "age": 34, "active": false, "gender": "m" }, - { "id": 104, "name": "Michael", "age": 33, "active": true, "gender": "m" }, - { "id": 105, "name": "Alexander", "age": 32, "active": true, "gender": "m" }, - { "id": 106, "name": "Daniel", "age": 31, "active": true, "gender": "m" }, - { "id": 107, "name": "Anthony", "age": 30, "active": true, "gender": "m" }, - { "id": 108, "name": "Jim", "age": 29, "active": true, "gender": "m" }, - { "id": 109, "name": "Diego", "age": 28, "active": true, "gender": "m" }, - { "id": 200, "name": "Sophia", "age": 37, "active": true, "gender": "f" }, - { "id": 201, "name": "Emma", "age": 36, "active": true, "gender": "f" }, - { "id": 202, "name": "Olivia", "age": 35, "active": false, "gender": "f" }, - { "id": 203, "name": "Madison", "age": 34, "active": true, "gender": "f" }, - { "id": 204, "name": "Chloe", "age": 33, "active": true, "gender": "f" }, - { "id": 205, "name": "Eva", "age": 32, "active": false, "gender": "f" }, - { "id": 206, "name": "Abigail", "age": 31, "active": true, "gender": "f" }, - { "id": 207, "name": "Isabella", "age": 30, "active": true, "gender": "f" }, - { "id": 208, "name": "Mary", "age": 29, "active": true, "gender": "f" }, - { "id": 209, "name": "Mariah", "age": 28, "active": true, "gender": "f" } -] -``` - -For some of the examples, we'll also use a collection *relations* to store -relationships between users. The example data for *relations* are as follows: - -```json -[ - { "from": 209, "to": 205, "type": "friend" }, - { "from": 206, "to": 108, "type": "friend" }, - { "from": 202, "to": 204, "type": "friend" }, - { "from": 200, "to": 100, "type": "friend" }, - { "from": 205, "to": 101, "type": "friend" }, - { "from": 209, "to": 203, "type": "friend" }, - { "from": 200, "to": 203, "type": "friend" }, - { "from": 100, "to": 208, "type": "friend" }, - { "from": 101, "to": 209, "type": "friend" }, - { "from": 206, "to": 102, "type": "friend" }, - { "from": 104, "to": 100, "type": "friend" }, - { "from": 104, "to": 108, "type": "friend" }, - { "from": 108, "to": 209, "type": "friend" }, - { "from": 206, "to": 106, "type": "friend" }, - { "from": 204, "to": 105, "type": "friend" }, - { "from": 208, "to": 207, "type": "friend" }, - { "from": 102, "to": 108, "type": "friend" }, - { "from": 207, "to": 203, "type": "friend" }, - { "from": 203, "to": 106, "type": "friend" }, - { "from": 202, "to": 108, "type": "friend" }, - { "from": 201, "to": 203, "type": "friend" }, - { "from": 105, "to": 100, "type": "friend" }, - { "from": 100, "to": 109, "type": "friend" }, - { "from": 207, "to": 109, "type": "friend" }, - { "from": 103, "to": 203, "type": "friend" }, - { "from": 208, "to": 104, "type": "friend" }, - { "from": 105, "to": 104, "type": "friend" }, - { "from": 103, "to": 208, "type": "friend" }, - { "from": 203, "to": 107, "type": "boyfriend" }, - { "from": 107, "to": 203, "type": "girlfriend" }, - { "from": 208, "to": 109, "type": "boyfriend" }, - { "from": 109, "to": 208, "type": "girlfriend" }, - { "from": 106, "to": 205, "type": "girlfriend" }, - { "from": 205, "to": 106, "type": "boyfriend" }, - { "from": 103, "to": 209, "type": "girlfriend" }, - { "from": 209, "to": 103, "type": "boyfriend" }, - { "from": 201, "to": 102, "type": "boyfriend" }, - { "from": 102, "to": 201, "type": "girlfriend" }, - { "from": 206, "to": 100, "type": "boyfriend" }, - { "from": 100, "to": 206, "type": "girlfriend" } -] -``` diff --git a/Documentation/Books/AQL/Examples/RemoveVertex.md b/Documentation/Books/AQL/Examples/RemoveVertex.md deleted file mode 100644 index ea9cc8a197f7..000000000000 --- a/Documentation/Books/AQL/Examples/RemoveVertex.md +++ /dev/null @@ -1,70 +0,0 @@ -Remove Vertex -============= - -Deleting vertices with associated edges is currently not handled via AQL while -the [graph management interface](../../Manual/Graphs/GeneralGraphs/Management.html#remove-a-vertex) -and the -[REST API for the graph module](../../HTTP/Gharial/Vertices.html#remove-a-vertex) -offer a vertex deletion functionality. -However, as shown in this example based on the -[knows_graph](../../Manual/Graphs/index.html#the-knowsgraph), a query for this -use case can be created. - -![Example Graph](../../Manual/Graphs/knows_graph.png) - -When deleting vertex **eve** from the graph, we also want the edges -`eve -> alice` and `eve -> bob` to be removed. -The involved graph and its only edge collection has to be known. In this case it -is the graph **knows_graph** and the edge collection **knows**. - -This query will delete **eve** with its adjacent edges: - - @startDocuBlockInline GRAPHTRAV_removeVertex1 - @EXAMPLE_AQL{GRAPHTRAV_removeVertex1} - @DATASET{knows_graph} -LET edgeKeys = (FOR v, e IN 1..1 ANY 'persons/eve' GRAPH 'knows_graph' RETURN e._key) -LET r = (FOR key IN edgeKeys REMOVE key IN knows) -REMOVE 'eve' IN persons - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_removeVertex1 - -This query executed several actions: -* use a graph traversal of depth 1 to get the `_key` of **eve's** adjacent edges -* remove all of these edges from the `knows` collection -* remove vertex **eve** from the `persons` collection - -The following query shows a different design to achieve the same result: - - @startDocuBlockInline GRAPHTRAV_removeVertex2 - @EXAMPLE_AQL{GRAPHTRAV_removeVertex2} - @DATASET{knows_graph} -LET edgeKeys = (FOR v, e IN 1..1 ANY 'persons/eve' GRAPH 'knows_graph' - REMOVE e._key IN knows) -REMOVE 'eve' IN persons - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_removeVertex2 - -**Note**: The query has to be adjusted to match a graph with multiple vertex/edge collections. - -For example, the [city graph](../../Manual/Graphs/index.html#the-city-graph) -contains several vertex collections - `germanCity` and `frenchCity` and several -edge collections - `french / german / international Highway`. - -![Example Graph2](../../Manual/Graphs/cities_graph.png) - -To delete city **Berlin** all edge collections `french / german / international Highway` -have to be considered. The **REMOVE** operation has to be applied on all edge -collections with `OPTIONS { ignoreErrors: true }`. Not using this option will stop the query -whenever a non existing key should be removed in a collection. - - @startDocuBlockInline GRAPHTRAV_removeVertex3 - @EXAMPLE_AQL{GRAPHTRAV_removeVertex3} - @DATASET{routeplanner} -LET edgeKeys = (FOR v, e IN 1..1 ANY 'germanCity/Berlin' GRAPH 'routeplanner' RETURN e._key) -LET r = (FOR key IN edgeKeys REMOVE key IN internationalHighway - OPTIONS { ignoreErrors: true } REMOVE key IN germanHighway - OPTIONS { ignoreErrors: true } REMOVE key IN frenchHighway - OPTIONS { ignoreErrors: true }) -REMOVE 'Berlin' IN germanCity - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_removeVertex3 diff --git a/Documentation/Books/AQL/ExecutionAndPerformance/ExplainingQueries.md b/Documentation/Books/AQL/ExecutionAndPerformance/ExplainingQueries.md deleted file mode 100644 index 296a0a9292a6..000000000000 --- a/Documentation/Books/AQL/ExecutionAndPerformance/ExplainingQueries.md +++ /dev/null @@ -1,221 +0,0 @@ -Explaining queries -================== - -If it is unclear how a given query will perform, clients can retrieve a query's execution plan -from the AQL query optimizer without actually executing the query. Getting the query execution -plan from the optimizer is called *explaining*. - -An explain will throw an error if the given query is syntactically invalid. Otherwise, it will -return the execution plan and some information about what optimizations could be applied to -the query. The query will not be executed. - -Explaining a query can be achieved by calling the [HTTP REST API](../../HTTP/AqlQuery/index.html) -or via _arangosh_. -A query can also be explained from the ArangoShell using the `ArangoDatabase`'s `explain` method -or in detail via `ArangoStatement`'s `explain` method. - - -Inspecting query plans ----------------------- - -The `explain` method of `ArangoStatement` as shown in the next chapters creates very verbose output. -To get a human-readable output of the query plan you can use the `explain` method on our database -object in arangosh. You may use it like this: (we disable syntax highlighting here) - - @startDocuBlockInline 01_workWithAQL_databaseExplain - @EXAMPLE_ARANGOSH_OUTPUT{01_workWithAQL_databaseExplain} - db._explain("LET s = SLEEP(0.25) LET t = SLEEP(0.5) RETURN 1", {}, {colors: false}); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 01_workWithAQL_databaseExplain - -The plan contains all execution nodes that are used during a query. These nodes represent different -stages in a query. Each stage gets the input from the stage directly above (its dependencies). -The plan will show you the estimated number of items (results) for each query stage (under _Est._). Each -query stage roughly equates to a line in your original query, which you can see under _Comment_. - - -Profiling queries ------------------ - -Sometimes when you have a complex query it can be unclear on what time is spent -during the execution, even for intermediate ArangoDB users. - -By profiling a query it gets executed with special instrumentation code enabled. -It gives you all the usual information like when explaining a query, but -additionally you get the query profile, [runtime statistics](QueryStatistics.md) -and per-node statistics. - -To use this in an interactive fashion on the shell you can use the -`_profileQuery()` method on the `ArangoDatabase` object or use the web interface. - -For more information see [Profiling Queries](QueryProfiler.md). - - @startDocuBlockInline 01_workWithAQL_databaseProfileQuery - @EXAMPLE_ARANGOSH_OUTPUT{01_workWithAQL_databaseProfileQuery} - db._profileQuery("LET s = SLEEP(0.25) LET t = SLEEP(0.5) RETURN 1", {}, {colors: false}); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 01_workWithAQL_databaseProfileQuery - - -Execution plans in detail -------------------------- - -By default, the query optimizer will return what it considers to be the *optimal plan*. The -optimal plan will be returned in the `plan` attribute of the result. If `explain` is -called with option `allPlans` set to `true`, all plans will be returned in the `plans` -attribute instead. The result object will also contain an attribute *warnings*, which -is an array of warnings that occurred during optimization or execution plan creation. - -Each plan in the result is an object with the following attributes: -- *nodes*: the array of execution nodes of the plan. [The list of available node types - can be found here](Optimizer.md) -- *estimatedCost*: the total estimated cost for the plan. If there are multiple - plans, the optimizer will choose the plan with the lowest total cost. -- *collections*: an array of collections used in the query -- *rules*: an array of rules the optimizer applied. [The list of rules can be - found here](Optimizer.md) -- *variables*: array of variables used in the query (note: this may contain - internal variables created by the optimizer) - -Here is an example for retrieving the execution plan of a simple query: - - @startDocuBlockInline 07_workWithAQL_statementsExplain - @EXAMPLE_ARANGOSH_OUTPUT{07_workWithAQL_statementsExplain} - |var stmt = db._createStatement( - "FOR user IN _users RETURN user"); - stmt.explain(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 07_workWithAQL_statementsExplain - -As the output of `explain` is very detailed, it is recommended to use some -scripting to make the output less verbose: - - @startDocuBlockInline 08_workWithAQL_statementsPlans - @EXAMPLE_ARANGOSH_OUTPUT{08_workWithAQL_statementsPlans} - |var formatPlan = function (plan) { - | return { estimatedCost: plan.estimatedCost, - | nodes: plan.nodes.map(function(node) { - return node.type; }) }; }; - formatPlan(stmt.explain().plan); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 08_workWithAQL_statementsPlans - -If a query contains bind parameters, they must be added to the statement **before** -`explain` is called: - - @startDocuBlockInline 09_workWithAQL_statementsPlansBind - @EXAMPLE_ARANGOSH_OUTPUT{09_workWithAQL_statementsPlansBind} - |var stmt = db._createStatement( - | `FOR doc IN @@collection FILTER doc.user == @user RETURN doc` - ); - stmt.bind({ "@collection" : "_users", "user" : "root" }); - stmt.explain(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 09_workWithAQL_statementsPlansBind - -In some cases the AQL optimizer creates multiple plans for a single query. By default -only the plan with the lowest total estimated cost is kept, and the other plans are -discarded. To retrieve all plans the optimizer has generated, `explain` can be called -with the option `allPlans` set to `true`. - -In the following example, the optimizer has created two plans: - - @startDocuBlockInline 10_workWithAQL_statementsPlansOptimizer0 - @EXAMPLE_ARANGOSH_OUTPUT{10_workWithAQL_statementsPlansOptimizer0} - |var stmt = db._createStatement( - "FOR user IN _users FILTER user.user == 'root' RETURN user"); - stmt.explain({ allPlans: true }).plans.length; - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 10_workWithAQL_statementsPlansOptimizer0 - -To see a slightly more compact version of the plan, the following transformation can be applied: - - @startDocuBlockInline 10_workWithAQL_statementsPlansOptimizer1 - @EXAMPLE_ARANGOSH_OUTPUT{10_workWithAQL_statementsPlansOptimizer1} - ~var stmt = db._createStatement("FOR user IN _users FILTER user.user == 'root' RETURN user"); - |stmt.explain({ allPlans: true }).plans.map( - function(plan) { return formatPlan(plan); }); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 10_workWithAQL_statementsPlansOptimizer1 - -`explain` will also accept the following additional options: -- *maxPlans*: limits the maximum number of plans that are created by the AQL query optimizer -- *optimizer.rules*: an array of to-be-included or to-be-excluded optimizer rules - can be put into this attribute, telling the optimizer to include or exclude - specific rules. To disable a rule, prefix its name with a `-`, to enable a rule, prefix it - with a `+`. There is also a pseudo-rule `all`, which will match all optimizer rules. - -The following example disables all optimizer rules but `remove-redundant-calculations`: - - @startDocuBlockInline 10_workWithAQL_statementsPlansOptimizer2 - @EXAMPLE_ARANGOSH_OUTPUT{10_workWithAQL_statementsPlansOptimizer2} - ~var stmt = db._createStatement("FOR user IN _users FILTER user.user == 'root' RETURN user"); - |stmt.explain({ optimizer: { - rules: [ "-all", "+remove-redundant-calculations" ] } }); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 10_workWithAQL_statementsPlansOptimizer2 - - -The contents of an execution plan are meant to be machine-readable. To get a human-readable -version of a query's execution plan, the following commands can be used: - - @startDocuBlockInline 10_workWithAQL_statementsPlansOptimizer3 - @EXAMPLE_ARANGOSH_OUTPUT{10_workWithAQL_statementsPlansOptimizer3} - var query = "FOR doc IN mycollection FILTER doc.value > 42 RETURN doc"; - require("@arangodb/aql/explainer").explain(query, {colors:false}); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 10_workWithAQL_statementsPlansOptimizer3 - -The above command prints the query's execution plan in the ArangoShell directly, focusing -on the most important information. - - -Gathering debug information about a query ------------------------------------------ - -If an explain provides no suitable insight into why a query does not perform as -expected, it may be reported to the ArangoDB support. In order to make this as easy -as possible, there is a built-in command in ArangoShell for packaging the query, its -bind parameters and all data required to execute the query elsewhere. - -The command will store all data in a file with a configurable filename: - - @startDocuBlockInline 10_workWithAQL_debugging1 - @EXAMPLE_ARANGOSH_OUTPUT{10_workWithAQL_debugging1} - var query = "FOR doc IN mycollection FILTER doc.value > 42 RETURN doc"; - require("@arangodb/aql/explainer").debugDump("/tmp/query-debug-info", query); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 10_workWithAQL_debugging1 - -Entitled users can send the generated file to the ArangoDB support to facilitate -reproduction and debugging. - -If a query contains bind parameters, they will need to specified along with the query -string: - - @startDocuBlockInline 10_workWithAQL_debugging2 - @EXAMPLE_ARANGOSH_OUTPUT{10_workWithAQL_debugging2} - var query = "FOR doc IN @@collection FILTER doc.value > @value RETURN doc"; - var bind = { value: 42, "@collection": "mycollection" }; - require("@arangodb/aql/explainer").debugDump("/tmp/query-debug-info", query, bind); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 10_workWithAQL_debugging2 - -It is also possible to include example documents from the underlying collection in -order to make reproduction even easier. Example documents can be sent as they are, or -in an anonymized form. The number of example documents can be specified in the *examples* -options attribute, and should generally be kept low. The *anonymize* option will replace -the contents of string attributes in the examples with "XXX". It will however not -replace any other types of data (e.g. numeric values) or attribute names. Attribute -names in the examples will always be preserved because they may be indexed and used in -queries: - - @startDocuBlockInline 10_workWithAQL_debugging3 - @EXAMPLE_ARANGOSH_OUTPUT{10_workWithAQL_debugging3} - var query = "FOR doc IN @@collection FILTER doc.value > @value RETURN doc"; - var bind = { value: 42, "@collection": "mycollection" }; - var options = { examples: 10, anonymize: true }; - require("@arangodb/aql/explainer").debugDump("/tmp/query-debug-info", query, bind, options); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 10_workWithAQL_debugging3 - diff --git a/Documentation/Books/AQL/ExecutionAndPerformance/Optimizer.md b/Documentation/Books/AQL/ExecutionAndPerformance/Optimizer.md deleted file mode 100644 index 79394164a548..000000000000 --- a/Documentation/Books/AQL/ExecutionAndPerformance/Optimizer.md +++ /dev/null @@ -1,533 +0,0 @@ -The AQL query optimizer -======================= - -AQL queries are sent through an optimizer before execution. The task of the optimizer is -to create an initial execution plan for the query, look for optimization opportunities and -apply them. As a result, the optimizer might produce multiple execution plans for a -single query. It will then calculate the costs for all plans and pick the plan with the -lowest total cost. This resulting plan is considered to be the *optimal plan*, which is -then executed. - -The optimizer is designed to only perform optimizations if they are *safe*, in the -meaning that an optimization should not modify the result of a query. A notable exception -to this is that the optimizer is allowed to change the order of results for queries that -do not explicitly specify how results should be sorted. - -Execution plans ---------------- - -The `explain` command can be used to query the optimal executed plan or even all plans -the optimizer has generated. Additionally, `explain` can reveal some more information -about the optimizer's view of the query. - -### Inspecting plans using the explain helper - -The `explain` method of `ArangoStatement` as shown in the next chapters creates very verbose output. -You can work on the output programmatically, or use this handsome tool that we created -to generate a more human readable representation. - -You may use it like this: (we disable syntax highlighting here) - - @startDocuBlockInline AQLEXP_01_axplainer - @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_01_axplainer} - ~addIgnoreCollection("test") - ~db._drop("test"); - db._create("test"); - for (i = 0; i < 100; ++i) { db.test.save({ value: i }); } - db.test.ensureIndex({ type: "skiplist", fields: [ "value" ] }); - var explain = require("@arangodb/aql/explainer").explain; - explain("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value", {colors:false}); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock AQLEXP_01_axplainer - - -### Execution plans in detail - -Let's have a look at the raw json output of the same execution plan -using the `explain` method of `ArangoStatement`: - - @startDocuBlockInline AQLEXP_01_explainCreate - @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_01_explainCreate} - stmt = db._createStatement("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value"); - stmt.explain(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock AQLEXP_01_explainCreate - -As you can see, the result details are very verbose so we will not show them in full in the next -sections. Instead, let's take a closer look at the results step by step. - -#### Execution nodes - -In general, an execution plan can be considered to be a pipeline of processing steps. -Each processing step is carried out by a so-called *execution node* - -The `nodes` attribute of the `explain` result contains these *execution nodes* in -the *execution plan*. The output is still very verbose, so here's a shorted form of it: - - @startDocuBlockInline AQLEXP_02_explainOverview - @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_02_explainOverview} - ~var stmt = db._createStatement("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value"); - stmt.explain().plan.nodes.map(function (node) { return node.type; }); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock AQLEXP_02_explainOverview - -*Note that the list of nodes might slightly change in future versions of ArangoDB if -new execution node types get added or the optimizer create somewhat more -optimized plans).* - -When a plan is executed, the query execution engine will start with the node at -the bottom of the list (i.e. the *ReturnNode*). - -The *ReturnNode*'s purpose is to return data to the caller. It does not produce -data itself, so it will ask the node above itself, this is the *CalculationNode* -in our example. -*CalculationNode*s are responsible for evaluating arbitrary expressions. In our -example query, the *CalculationNode* will evaluate the value of `i.value`, which -is needed by the *ReturnNode*. The calculation will be applied for all data the -*CalculationNode* gets from the node above it, in our example the *IndexNode*. - -Finally, all of this needs to be done for documents of collection `test`. This is -where the *IndexNode* enters the game. It will use an index (thus its name) -to find certain documents in the collection and ship it down the pipeline in the -order required by `SORT i.value`. The *IndexNode* itself has a *SingletonNode* -as its input. The sole purpose of a *SingletonNode* node is to provide a single empty -document as input for other processing steps. It is always the end of the pipeline. - -Here's a summary: -* SingletonNode: produces an empty document as input for other processing steps. -* IndexNode: iterates over the index on attribute `value` in collection `test` - in the order required by `SORT i.value`. -* CalculationNode: evaluates the result of the calculation `i.value > 97` to `true` or `false` -* CalculationNode: calculates return value `i.value` -* ReturnNode: returns data to the caller - - -#### Optimizer rules - -Note that in the example, the optimizer has optimized the `SORT` statement away. -It can do it safely because there is a sorted skiplist index on `i.value`, which it has -picked in the *IndexNode*. As the index values are iterated over in sorted order -anyway, the extra *SortNode* would have been redundant and was removed. - -Additionally, the optimizer has done more work to generate an execution plan that -avoids as much expensive operations as possible. Here is the list of optimizer rules -that were applied to the plan: - - @startDocuBlockInline AQLEXP_03_explainRules - @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_03_explainRules} - ~var stmt = db._createStatement("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value"); - stmt.explain().plan.rules; - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock AQLEXP_03_explainRules - -Here is the meaning of these rules in context of this query: -* `move-calculations-up`: moves a *CalculationNode* as far up in the processing pipeline - as possible -* `move-filters-up`: moves a *FilterNode* as far up in the processing pipeline as - possible -* `remove-redundant-calculations`: replaces references to variables with references to - other variables that contain the exact same result. In the example query, `i.value` - is calculated multiple times, but each calculation inside a loop iteration would - produce the same value. Therefore, the expression result is shared by several nodes. -* `remove-unnecessary-calculations`: removes *CalculationNode*s whose result values are - not used in the query. In the example this happens due to the `remove-redundant-calculations` - rule having made some calculations unnecessary. -* `use-indexes`: use an index to iterate over a collection instead of performing a - full collection scan. In the example case this makes sense, as the index can be - used for filtering and sorting. -* `remove-filter-covered-by-index`: remove an unnecessary filter whose functionality - is already covered by an index. In this case the index only returns documents - matching the filter. -* `use-index-for-sort`: removes a `SORT` operation if it is already satisfied by - traversing over a sorted index - -Note that some rules may appear multiple times in the list, with number suffixes. -This is due to the same rule being applied multiple times, at different positions -in the optimizer pipeline. - - -#### Collections used in a query - -The list of collections used in a plan (and query) is contained in the `collections` -attribute of a plan: - - @startDocuBlockInline AQLEXP_04_explainCollections - @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_04_explainCollections} - ~var stmt = db._createStatement("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value"); - stmt.explain().plan.collections - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock AQLEXP_04_explainCollections - -The `name` attribute contains the name of the `collection`, and `type` is the -access type, which can be either `read` or `write`. - - -#### Variables used in a query - -The optimizer will also return a list of variables used in a plan (and query). This -list will contain auxiliary variables created by the optimizer itself. This list -can be ignored by end users in most cases. - - -#### Cost of a query - -For each plan the optimizer generates, it will calculate the total cost. The plan -with the lowest total cost is considered to be the optimal plan. Costs are -estimates only, as the actual execution costs are unknown to the optimizer. -Costs are calculated based on heuristics that are hard-coded into execution nodes. -Cost values do not have any unit. - - -### Retrieving all execution plans - -To retrieve not just the optimal plan but a list of all plans the optimizer has -generated, set the option `allPlans` to `true`: - -This will return a list of all plans in the `plans` attribute instead of in the -`plan` attribute: - - @startDocuBlockInline AQLEXP_05_explainAllPlans - @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_05_explainAllPlans} - ~var stmt = db._createStatement("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value"); - stmt.explain({ allPlans: true }); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock AQLEXP_05_explainAllPlans - -### Retrieving the plan as it was generated by the parser / lexer - -To retrieve the plan which closely matches your query, you may turn off most -optimization rules (i.e. cluster rules cannot be disabled if you're running -the explain on a cluster coordinator) set the option `rules` to `-all`: - -This will return an unoptimized plan in the `plan`: - - @startDocuBlockInline AQLEXP_06_explainUnoptimizedPlans - @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_06_explainUnoptimizedPlans} - ~var stmt = db._createStatement("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value"); - stmt.explain({ optimizer: { rules: [ "-all" ] } }); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock AQLEXP_06_explainUnoptimizedPlans - -Note that some optimizations are already done at parse time (i.e. evaluate simple constant -calculation as `1 + 1`) - - -Turning specific optimizer rules off ------------------------------------- - -Optimizer rules can also be turned on or off individually, using the `rules` attribute. -This can be used to enable or disable one or multiple rules. Rules that shall be enabled -need to be prefixed with a `+`, rules to be disabled should be prefixed with a `-`. The -pseudo-rule `all` matches all rules. - -Rules specified in `rules` are evaluated from left to right, so the following works to -turn on just the one specific rule: - - @startDocuBlockInline AQLEXP_07_explainSingleRulePlans - @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_07_explainSingleRulePlans} - ~var stmt = db._createStatement("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value"); - stmt.explain({ optimizer: { rules: [ "-all", "+use-index-range" ] } }); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock AQLEXP_07_explainSingleRulePlans - -By default, all rules are turned on. To turn off just a few specific rules, use something -like this: - - @startDocuBlockInline AQLEXP_08_explainDisableSingleRulePlans - @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_08_explainDisableSingleRulePlans} - ~var stmt = db._createStatement("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value"); - stmt.explain({ optimizer: { rules: [ "-use-index-range", "-use-index-for-sort" ] } }); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock AQLEXP_08_explainDisableSingleRulePlans - -The maximum number of plans created by the optimizer can also be limited using the -`maxNumberOfPlans` attribute: - - @startDocuBlockInline AQLEXP_09_explainMaxNumberOfPlans - @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_09_explainMaxNumberOfPlans} - ~var stmt = db._createStatement("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value"); - stmt.explain({ maxNumberOfPlans: 1 }); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock AQLEXP_09_explainMaxNumberOfPlans - -Optimizer statistics --------------------- - -The optimizer will return statistics as a part of an `explain` result. - -The following attributes will be returned in the `stats` attribute of an `explain` result: - -- `plansCreated`: total number of plans created by the optimizer -- `rulesExecuted`: number of rules executed (note: an executed rule does not - indicate a plan was actually modified by a rule) -- `rulesSkipped`: number of rules skipped by the optimizer - -Warnings --------- - -For some queries, the optimizer may produce warnings. These will be returned in -the `warnings` attribute of the `explain` result: - - @startDocuBlockInline AQLEXP_10_explainWarn - @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_10_explainWarn} - var stmt = db._createStatement("FOR i IN 1..10 RETURN 1 / 0") - stmt.explain().warnings; - ~db._drop("test") - ~removeIgnoreCollection("test") - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock AQLEXP_10_explainWarn - -There is an upper bound on the number of warning a query may produce. If that -bound is reached, no further warnings will be returned. - - -Optimization in a cluster -------------------------- - -When you're running AQL in the cluster, the parsing of the query is done on the -coordinator. The coordinator then chops the query into snipets, which are to -remain on the coordinator, and others that are to be distributed over the network -to the shards. The cutting sites are interconnected via *Scatter-*, *Gather-* and *RemoteNodes*. - -These nodes mark the network borders of the snippets. The optimizer strives to reduce the amount -of data transfered via these network interfaces by pushing `FILTER`s out to the shards, -as it is vital to the query performance to reduce that data amount to transfer over the -network links. - -Snippets marked with **DBS** are executed on the shards, **COOR** ones are excuted on the coordinator. - -**As usual, the optimizer can only take certain assumptions for granted when doing so, -i.e. [user-defined functions have to be executed on the coordinator](../Extending/README.md). -If in doubt, you should modify your query to reduce the number interconnections between your snippets.** - -When optimizing your query you may want to look at simpler parts of it first. - -List of execution nodes ------------------------ - -The following execution node types will appear in the output of `explain`: - -* *SingletonNode*: the purpose of a *SingletonNode* is to produce an empty document - that is used as input for other processing steps. Each execution plan will contain - exactly one *SingletonNode* as its top node. -* *EnumerateCollectionNode*: enumeration over documents of a collection (given in - its *collection* attribute) without using an index. -* *IndexNode*: enumeration over one or many indexes (given in its *indexes* attribute) - of a collection. The index ranges are specified in the *condition* attribute of the node. -* *EnumerateListNode*: enumeration over a list of (non-collection) values. -* *FilterNode*: only lets values pass that satisfy a filter condition. Will appear once - per *FILTER* statement. -* *LimitNode*: limits the number of results passed to other processing steps. Will - appear once per *LIMIT* statement. -* *CalculationNode*: evaluates an expression. The expression result may be used by - other nodes, e.g. *FilterNode*, *EnumerateListNode*, *SortNode* etc. -* *SubqueryNode*: executes a subquery. -* *SortNode*: performs a sort of its input values. -* *AggregateNode*: aggregates its input and produces new output variables. This will - appear once per *COLLECT* statement. -* *ReturnNode*: returns data to the caller. Will appear in each read-only query at - least once. Subqueries will also contain *ReturnNode*s. -* *InsertNode*: inserts documents into a collection (given in its *collection* - attribute). Will appear exactly once in a query that contains an *INSERT* statement. -* *RemoveNode*: removes documents from a collection (given in its *collection* - attribute). Will appear exactly once in a query that contains a *REMOVE* statement. -* *ReplaceNode*: replaces documents in a collection (given in its *collection* - attribute). Will appear exactly once in a query that contains a *REPLACE* statement. -* *UpdateNode*: updates documents in a collection (given in its *collection* - attribute). Will appear exactly once in a query that contains an *UPDATE* statement. -* *UpsertNode*: upserts documents in a collection (given in its *collection* - attribute). Will appear exactly once in a query that contains an *UPSERT* statement. -* *NoResultsNode*: will be inserted if *FILTER* statements turn out to be never - satisfiable. The *NoResultsNode* will pass an empty result set into the processing - pipeline. - -For queries in the cluster, the following nodes may appear in execution plans: - -* *SingleRemoteOperationNode*: used on a coordinator to directly work with a single - document on a DB-Server that was referenced by its `_key`. -* *ScatterNode*: used on a coordinator to fan-out data to one or multiple shards. -* *GatherNode*: used on a coordinator to aggregate results from one or many shards - into a combined stream of results. -* *DistributeNode*: used on a coordinator to fan-out data to one or multiple shards, - taking into account a collection's shard key. -* *RemoteNode*: a *RemoteNode* will perform communication with another ArangoDB - instances in the cluster. For example, the cluster coordinator will need to - communicate with other servers to fetch the actual data from the shards. It - will do so via *RemoteNode*s. The data servers themselves might again pull - further data from the coordinator, and thus might also employ *RemoteNode*s. - So, all of the above cluster relevant nodes will be accompanied by a *RemoteNode*. - - -List of optimizer rules ------------------------ - -The following optimizer rules may appear in the `rules` attribute of a plan: - -* `move-calculations-up`: will appear if a *CalculationNode* was moved up in a plan. - The intention of this rule is to move calculations up in the processing pipeline - as far as possible (ideally out of enumerations) so they are not executed in loops - if not required. It is also quite common that this rule enables further optimizations - to kick in. -* `move-filters-up`: will appear if a *FilterNode* was moved up in a plan. The - intention of this rule is to move filters up in the processing pipeline as far - as possible (ideally out of inner loops) so they filter results as early as possible. -* `sort-in-values`: will appear when the values used as right-hand side of an `IN` - operator will be pre-sorted using an extra function call. Pre-sorting the comparison - array allows using a binary search in-list lookup with a logarithmic complexity instead - of the default linear complexity in-list lookup. -* `remove-unnecessary-filters`: will appear if a *FilterNode* was removed or replaced. - *FilterNode*s whose filter condition will always evaluate to *true* will be - removed from the plan, whereas *FilterNode* that will never let any results pass - will be replaced with a *NoResultsNode*. -* `remove-redundant-calculations`: will appear if redundant calculations (expressions - with the exact same result) were found in the query. The optimizer rule will then - replace references to the redundant expressions with a single reference, allowing - other optimizer rules to remove the then-unneeded *CalculationNode*s. -* `remove-unnecessary-calculations`: will appear if *CalculationNode*s were removed - from the query. The rule will removed all calculations whose result is not - referenced in the query (note that this may be a consequence of applying other - optimizations). -* `remove-redundant-sorts`: will appear if multiple *SORT* statements can be merged - into fewer sorts. -* `interchange-adjacent-enumerations`: will appear if a query contains multiple - *FOR* statements whose order were permuted. Permutation of *FOR* statements is - performed because it may enable further optimizations by other rules. -* `remove-collect-variables`: will appear if an *INTO* clause was removed from a *COLLECT* - statement because the result of *INTO* is not used. May also appear if a result - of a *COLLECT* statement's *AGGREGATE* variables is not used. -* `propagate-constant-attributes`: will appear when a constant value was inserted - into a filter condition, replacing a dynamic attribute value. -* `replace-or-with-in`: will appear if multiple *OR*-combined equality conditions - on the same variable or attribute were replaced with an *IN* condition. -* `remove-redundant-or`: will appear if multiple *OR* conditions for the same variable - or attribute were combined into a single condition. -* `use-indexes`: will appear when an index is used to iterate over a collection. - As a consequence, an *EnumerateCollectionNode* was replaced with an - *IndexNode* in the plan. -* `remove-filter-covered-by-index`: will appear if a *FilterNode* was removed or replaced - because the filter condition is already covered by an *IndexNode*. -* `remove-filter-covered-by-traversal`: will appear if a *FilterNode* was removed or replaced - because the filter condition is already covered by an *TraversalNode*. -* `use-index-for-sort`: will appear if an index can be used to avoid a *SORT* - operation. If the rule was applied, a *SortNode* was removed from the plan. -* `move-calculations-down`: will appear if a *CalculationNode* was moved down in a plan. - The intention of this rule is to move calculations down in the processing pipeline - as far as possible (below *FILTER*, *LIMIT* and *SUBQUERY* nodes) so they are executed - as late as possible and not before their results are required. -* `patch-update-statements`: will appear if an *UpdateNode* or *ReplaceNode* was patched - to not buffer its input completely, but to process it in smaller batches. The rule will - fire for an *UPDATE* or *REPLACE* query that is fed by a full collection scan or an index - scan only, and that does not use any other collections, indexes, subqueries or traversals. -* `optimize-traversals`: will appear if either the edge or path output variable in an - AQL traversal was optimized away, or if a *FILTER* condition from the query was moved - in the *TraversalNode* for early pruning of results. -* `inline-subqueries`: will appear when a subquery was pulled out in its surrounding scope, - e.g. `FOR x IN (FOR y IN collection FILTER y.value >= 5 RETURN y.test) RETURN x.a` - would become `FOR tmp IN collection FILTER tmp.value >= 5 LET x = tmp.test RETURN x.a` -* `geo-index-optimizer`: will appear when a geo index is utilized. -* `replace-function-with-index`: will appear when a deprecated index function such as - `FULLTEXT`, `NEAR`, `WITHIN` or `WITHIN_RECTANGLE` is replaced with a regular - subquery. -* `fuse-filters`: will appear if the optimizer merges adjacent FILTER nodes together into - a single FILTER node -* `simplify-conditions`: will appear if the optimizer replaces parts in a CalculationNode's - expression with simpler expressions -* `remove-sort-rand`: will appear when a *SORT RAND()* expression is removed by - moving the random iteration into an *EnumerateCollectionNode*. This optimizer rule - is specific for the MMFiles storage engine. -* `reduce-extraction-to-projection`: will appear when an *EnumerationCollectionNode* or - an *IndexNode* that would have extracted an entire document was modified to return - only a projection of each document. Projections are limited to at most 5 different - document attributes. This optimizer rule is specific for the RocksDB storage engine. -* `optimize-subqueries`: will appear when optimizations are applied to a subquery. The - optimizer rule will add a *LIMIT* statement to qualifying subqueries to make them - return less data. Another optimization performed by this rule is to modify the result - value of subqueries in case only the number of subquery results is checked later. - This saves copying the document data from the subquery to the outer scope and may - enable follow-up optimizations. -* `sort-limit`: will appear when a *SortNode* is followed by a *LimitNode* with no - intervening nodes that may change the element count (e.g. a *FilterNode* which - could not be moved before the sort, or a source node like *EnumerateCollectionNode*). - This is used to make the *SortNode* aware of the limit and offset from the *LimitNode* - to enable some optimizations internal to the *SortNode* which allow for reduced - memory usage and and in many cases, improved sorting speed. The optimizer may - choose not to apply the rule if it decides that it will offer little or no benefit. - In particular it will not apply the rule if the input size is very small or if - the output from the `LimitNode` is similar in size to the input. In exceptionally rare - cases, this rule could result in some small slowdown. If observed, one can - disable the rule for the affected query at the cost of increased memory usage. - -The following optimizer rules may appear in the `rules` attribute of cluster plans: - -* `optimize-cluster-single-document-operations`: it may appear if you directly reference - a document by its `_key`; in this case no AQL will be executed on the DB-Servers, instead - the coordinator will directly work with the documents on the DB-Servers. -* `distribute-in-cluster`: will appear when query parts get distributed in a cluster. - This is not an optimization rule, and it cannot be turned off. -* `scatter-in-cluster`: will appear when scatter, gather, and remote nodes are inserted - into a distributed query. This is not an optimization rule, and it cannot be turned off. -* `distribute-filtercalc-to-cluster`: will appear when filters are moved up in a - distributed execution plan. Filters are moved as far up in the plan as possible to - make result sets as small as possible as early as possible. -* `distribute-sort-to-cluster`: will appear if sorts are moved up in a distributed query. - Sorts are moved as far up in the plan as possible to make result sets as small as possible - as early as possible. -* `remove-unnecessary-remote-scatter`: will appear if a RemoteNode is followed by a - ScatterNode, and the ScatterNode is only followed by calculations or the SingletonNode. - In this case, there is no need to distribute the calculation, and it will be handled - centrally. -* `undistribute-remove-after-enum-coll`: will appear if a RemoveNode can be pushed into - the same query part that enumerates over the documents of a collection. This saves - inter-cluster roundtrips between the EnumerateCollectionNode and the RemoveNode. -* `collect-in-cluster`: will appear when a *CollectNode* on a coordinator is accompanied - by extra *CollectNode*s on the database servers, which will do the heavy processing and - allow the *CollectNode* on the coordinator to a light-weight aggregation only. -* `restrict-to-single-shard`: will appear if a collection operation (IndexNode or a - data-modification node) will only affect a single shard, and the operation can be - restricted to the single shard and is not applied for all shards. This optimization - can be applied for queries that access a collection only once in the query, and that - do not use traversals, shortest path queries and that do not access collection data - dynamically using the `DOCUMENT`, `FULLTEXT`, `NEAR` or `WITHIN` AQL functions. - Additionally, the optimizer will only pull off this optimization if can safely - determine the values of all the collection's shard keys from the query, and when the - shard keys are covered by a single index (this is always true if the shard key is - the default `_key`). -* `smart-joins`: will appear when the query optimizer can reduce an inter-node join - to a server-local join. This rule is only active in the *Enterprise Edition* of - ArangoDB, and will only be employed when joining two collections with identical - sharding setup via their shard keys. - -Note that some rules may appear multiple times in the list, with number suffixes. -This is due to the same rule being applied multiple times, at different positions -in the optimizer pipeline. - -### Additional optimizations applied - -If a query iterates over a collection (for filtering or counting) but does not need -the actual document values later, the optimizer can apply a "scan-only" optimization -for *EnumerateCollectionNode*s and *IndexNode*s. In this case, it will not build up -a result with the document data at all, which may reduce work significantly especially -with the RocksDB storage engine. In case the document data is actually not needed -later on, it may be sensible to remove it from query strings so the optimizer can -apply the optimization. - -If the optimization is applied, it will show up as "scan only" in an AQL -query's execution plan for an *EnumerateCollectionNode* or an *IndexNode*. - - -Additionally, the optimizer can apply an "index-only" optimization for AQL queries that -can satisfy the retrieval of all required document attributes directly from an index. - -This optimization will be triggered for the RocksDB engine if an index is used -that covers all required attributes of the document used later on in the query. -If applied, it will save retrieving the actual document data (which would require -an extra lookup in RocksDB), but will instead build the document data solely -from the index values found. It will only be applied when using up to 5 attributes -from the document, and only if the rest of the document data is not used later -on in the query. - -The optimization is currently available for the RocksDB engine for the index types -primary, edge, hash, skiplist and persistent. - -If the optimization is applied, it will show up as "index only" in an AQL -query's execution plan for an *IndexNode*. diff --git a/Documentation/Books/AQL/ExecutionAndPerformance/ParsingQueries.md b/Documentation/Books/AQL/ExecutionAndPerformance/ParsingQueries.md deleted file mode 100644 index 36d88173f435..000000000000 --- a/Documentation/Books/AQL/ExecutionAndPerformance/ParsingQueries.md +++ /dev/null @@ -1,25 +0,0 @@ -Parsing queries -=============== - -Clients can use ArangoDB to check if a given AQL query is syntactically valid. ArangoDB provides -an [HTTP REST API](../../HTTP/AqlQuery/index.html) for this. - -A query can also be parsed from the ArangoShell using `ArangoStatement`'s `parse` method. The -`parse` method will throw an exception if the query is syntactically invalid. Otherwise, it will -return the some information about the query. - -The return value is an object with the collection names used in the query listed in the -`collections` attribute, and all bind parameters listed in the `bindVars` attribute. -Additionally, the internal representation of the query, the query's abstract syntax tree, will -be returned in the `AST` attribute of the result. Please note that the abstract syntax tree -will be returned without any optimizations applied to it. - - @startDocuBlockInline 11_workWithAQL_parseQueries - @EXAMPLE_ARANGOSH_OUTPUT{11_workWithAQL_parseQueries} - |var stmt = db._createStatement( - "FOR doc IN @@collection FILTER doc.foo == @bar RETURN doc"); - stmt.parse(); - ~removeIgnoreCollection("mycollection") - ~db._drop("mycollection") - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 11_workWithAQL_parseQueries diff --git a/Documentation/Books/AQL/ExecutionAndPerformance/QueryCache.md b/Documentation/Books/AQL/ExecutionAndPerformance/QueryCache.md deleted file mode 100644 index a35b45bef247..000000000000 --- a/Documentation/Books/AQL/ExecutionAndPerformance/QueryCache.md +++ /dev/null @@ -1,235 +0,0 @@ -The AQL query results cache -=========================== - -AQL provides an optional query results cache. - -The purpose of the query results cache is to avoid repeated calculation of the same -query results. It is useful if data-reading queries repeat a lot and there are -not many write queries. - -The query results cache is transparent so users do not need to manually invalidate -results in it if underlying collection data are modified. - - -Modes ------ - -The cache can be operated in the following modes: - -* `off`: the cache is disabled. No query results will be stored -* `on`: the cache will store the results of all AQL queries unless their `cache` - attribute flag is set to `false` -* `demand`: the cache will store the results of AQL queries that have their - `cache` attribute set to `true`, but will ignore all others - -The mode can be set at server startup and later changed at runtime. - - -Query eligibility ------------------ - -The query results cache will consider two queries identical if they have exactly the -same query string and the same bind variables. Any deviation in terms of whitespace, -capitalization etc. will be considered a difference. The query string will be hashed -and used as the cache lookup key. If a query uses bind parameters, these will also be hashed -and used as part of the cache lookup key. - -That means even if the query strings of two queries are identical, the query results -cache will treat them as different queries if they have different bind parameter -values. Other components that will become part of a query's cache key are the -`count`, `fullCount` and `optimizer` attributes. - -If the cache is turned on, the cache will check at the very start of execution -whether it has a result ready for this particular query. If that is the case, -the query result will be served directly from the cache, which is normally -very efficient. If the query cannot be found in the cache, it will be executed -as usual. - -If the query is eligible for caching and the cache is turned on, the query -result will be stored in the query results cache so it can be used for subsequent -executions of the same query. - -A query is eligible for caching only if all of the following conditions are met: - -* the server the query executes on is not a coordinator -* the query string is at least 8 characters long -* the query is a read-only query and does not modify data in any collection -* no warnings were produced while executing the query -* the query is deterministic and only uses deterministic functions whose results - are marked as cacheable -* the size of the query result does not exceed the cache's configured maximal - size for individual cache results or cumulated results -* the query is not executed using a streaming cursor - -The usage of non-deterministic functions leads to a query not being cachable. -This is intentional to avoid caching of function results which should rather -be calculated on each invocation of the query (e.g. `RAND()` or `DATE_NOW()`). - -The query results cache considers all user-defined AQL functions to be non-deterministic -as it has no insight into these functions. - - -Cache invalidation ------------------- - -The cached results are fully or partially invalidated automatically if -queries modify the data of collections that were used during the computation of -the cached query results. This is to protect users from getting stale results -from the query results cache. - -This also means that if the cache is turned on, then there is an additional -cache invalidation check for each data-modification operation (e.g. insert, update, -remove, truncate operations as well as AQL data-modification queries). - -**Example** - -If the result of the following query is present in the query results cache, -then either modifying data in collection `users` or in collection `organizations` -will remove the already computed result from the cache: - -``` -FOR user IN users - FOR organization IN organizations - FILTER user.organization == organization._key - RETURN { user: user, organization: organization } -``` - -Modifying data in other collections than the named two will not lead to this -query result being removed from the cache. - - -Performance considerations --------------------------- - -The query results cache is organized as a hash table, so looking up whether a query result -is present in the cache is relatively fast. Still, the query string and the bind -parameter used in the query will need to be hashed. This is a slight overhead that -will not be present if the cache is turned off or a query is marked as not cacheable. - -Additionally, storing query results in the cache and fetching results from the -cache requires locking via an R/W lock. While many thread can read in parallel from -the cache, there can only be a single modifying thread at any given time. Modifications -of the query cache contents are required when a query result is stored in the cache -or during cache invalidation after data-modification operations. Cache invalidation -will require time proportional to the number of cached items that need to be invalidated. - -There may be workloads in which enabling the query results cache will lead to a performance -degradation. It is not recommended to turn the query resutls cache on in workloads that only -modify data, or that modify data more often than reading it. Turning on the cache -will also provide no benefit if queries are very diverse and do not repeat often. -In read-only or read-mostly workloads, the cache will be beneficial if the same -queries are repeated lots of times. - -In general, the query results cache will provide the biggest improvements for queries with -small result sets that take long to calculate. If query results are very big and -most of the query time is spent on copying the result from the cache to the client, -then the cache will not provide much benefit. - - -Global configuration --------------------- - -The query results cache can be configured at server start using the configuration parameter -`--query.cache-mode`. This will set the cache mode according to the descriptions -above. - -After the server is started, the cache mode can be changed at runtime as follows: - -``` -require("@arangodb/aql/cache").properties({ mode: "on" }); -``` - -The maximum number of cached results in the cache for each database can be configured -at server start using the following configuration parameters: - -* `--query.cache-entries`: maximum number of results in query result cache per database -* `--query.cache-entries-max-size`: maximum cumulated size of results in query result cache per database -* `--query.cache-entry-max-size`: maximum size of an invidiual result entry in query result cache -* `--query.cache-include-system-collections`: whether or not to include system collection queries in the query result cache - -These parameters can be used to put an upper bound on the number and size of query -results in each database's query cache and thus restrict the cache's memory consumption. - -These value can also be adjusted at runtime as follows: - -``` -require("@arangodb/aql/cache").properties({ - maxResults: 200, - maxResultsSize: 8 * 1024 * 1024, - maxEntrySize: 1024 * 1024, - includeSystem: false -}); -``` - -The above will limit the number of cached results in the query results cache to 200 -results per database, and to 8 MB cumulated query result size per database. The maximum -size of each query cache entry is restricted to 8MB. Queries that involve system -collections are excluded from caching. - - -Per-query configuration ------------------------ - -When a query is sent to the server for execution and the cache is set to `on` or `demand`, -the query executor will look into the query's `cache` attribute. If the query cache mode is -`on`, then not setting this attribute or setting it to anything but `false` will make the -query executor consult the query cache. If the query cache mode is `demand`, then setting -the `cache` attribute to `true` will make the executor look for the query in the query cache. -When the query cache mode is `off`, the executor will not look for the query in the cache. - -The `cache` attribute can be set as follows via the `db._createStatement()` function: - -``` -var stmt = db._createStatement({ - query: "FOR doc IN users LIMIT 5 RETURN doc", - cache: true /* cache attribute set here */ -}); - -stmt.execute(); -``` - -When using the `db._query()` function, the `cache` attribute can be set as follows: - -``` -db._query({ - query: "FOR doc IN users LIMIT 5 RETURN doc", - cache: true /* cache attribute set here */ -}); -``` - -The `cache` attribute can be set via the HTTP REST API `POST /_api/cursor`, too. - -Each query result returned will contain a `cached` attribute. This will be set to `true` -if the result was retrieved from the query cache, and `false` otherwise. Clients can use -this attribute to check if a specific query was served from the cache or not. - - -Query results cache inspection ------------------------------- - -The contents of the query results cache can be checked at runtime using the cache's -`toArray()` function: - -``` -require("@arangodb/aql/cache").toArray(); -``` - -This will return a list of all query results stored in the current database's query -results cache. - -The query results cache for the current database can be cleared at runtime using the -cache's `clear` function: - -``` -require("@arangodb/aql/cache").clear(); -``` - - -Restrictions ------------- - -Query results that are returned from the query results cache may contain execution statistics -stemming from the initial, uncached query execution. This means for a cached query results, -the *extra.stats* attribute may contain stale data, especially in terms of the *executionTime* -and *profile* attribute values. - diff --git a/Documentation/Books/AQL/ExecutionAndPerformance/QueryProfiler.md b/Documentation/Books/AQL/ExecutionAndPerformance/QueryProfiler.md deleted file mode 100644 index 84c1702abb9b..000000000000 --- a/Documentation/Books/AQL/ExecutionAndPerformance/QueryProfiler.md +++ /dev/null @@ -1,221 +0,0 @@ -Profiling and Hand-Optimizing AQL queries -========================================= - -To give you more insight into your query ArangoDB allows to execute your query -with special instrumentation code enabled. This will then print a query plan -with detailed execution statistics. - -To use this in an interactive fashion on the shell you can use -`db._profileQuery(..)` in _arangosh_. Alternatively, there is a button -_Profile_ in the Query tab of the web interface. - -The printed execution plan then contains three additional columns: - -- **Call**: The number of times this query stage was executed -- **Items**: The number of temporary result rows at this stage -- **Runtime**: The total time spent in this stage - -Below the execution plan there are additional sections for the overall runtime -statistics and the query profile. - -Example: Simple AQL query -------------------------- - -Assuming we got a collection named `acollection` and insert 10000 documents -via `for (let i=0; i < 10000;i++) db.acollection.insert({value:i})`. -Then a simple query filtering for `value < 10` will return 10 results: - -@startDocuBlockInline 01_workWithAQL_profileQuerySimple -@EXAMPLE_ARANGOSH_OUTPUT{01_workWithAQL_profileQuerySimple} -~db._drop("acollection"); -~db._create('acollection'); -~for (let i=0; i < 10000; i++) { db.acollection.insert({value:i}); } -|db._profileQuery(` -|FOR doc IN acollection -| FILTER doc.value < 10 -| RETURN doc`, {}, {colors: false} -); -~db._drop("acollection"); -@END_EXAMPLE_ARANGOSH_OUTPUT -@endDocuBlock 01_workWithAQL_profileQuerySimple - -An AQL query is essentially executed in a pipeline that chains together different -functional execution blocks. Each block gets the input rows from the parent above -it, does some processing and then outputs a certain number of output rows. - -Without any detailed insight into the query execution it is impossible to tell -how many results each pipeline-block had to work on and how long this took. -By executing the query with the query profiler (`db._profileQuery()` or via -the _Profile_ button in the web interface) you can check exactly how much work -each stage had to do. - -Without any indexes this query should have to perform the following operations: - -1. Perfom a full collection scan via a _EnumerateCollectionNode_ and outputting - a row containing the document in `doc`. -2. Calculate the boolean expression `LET #1 = doc.value < 10` from all inputs - via a _CalculationNode_ -3. Filter out all input rows where `#1` is false via the _FilterNode_ -4. Put the `doc` variable of the remaining rows into the result set via - the _ResultNode_ - -The _EnumerateCollectionNode_ processed and returned all 10k rows (documents), -as did the _CalculationNode_. Because the AQL execution engine also uses an -internal batch size of 1000 these blocks were also called 100 times each. -The _FilterNode_ as well as the _ReturnNode_ however only ever returned 10 rows -and only had to be called once, because the result size fits within a single batch. - -Let us add a skiplist index on `value` to speed up the query: - -```js -db.acollection.ensureIndex({type:"skiplist", fields:["value"]}); -``` - -@startDocuBlockInline 02_workWithAQL_profileQuerySimpleIndex -@EXAMPLE_ARANGOSH_OUTPUT{02_workWithAQL_profileQuerySimpleIndex} -~db._create('acollection'); -~db.acollection.ensureIndex({type:"skiplist", fields:["value"]}); -~for (let i=0; i < 10000; i++) { db.acollection.insert({value:i}); } -|db._profileQuery(` -|FOR doc IN acollection -| FILTER doc.value < 10 -| RETURN doc`, {}, {colors: false} -); -~db._drop("acollection"); -@END_EXAMPLE_ARANGOSH_OUTPUT -@endDocuBlock 02_workWithAQL_profileQuerySimpleIndex - -This results in replacing the collection scan and filter block with an -`IndexNode`. The execution pipeleine of the AQL query has become much shorter. -Also the number of rows processed by each pipeline block is only 10, because -we no longer need to look at all documents. - -Example: AQL with Subquery --------------------------- - -Let us consider a query containing a subquery: - -@startDocuBlockInline 03_workWithAQL_profileQuerySubquery -@EXAMPLE_ARANGOSH_OUTPUT{03_workWithAQL_profileQuerySubquery} -~db._create('acollection'); -~db.acollection.ensureIndex({type:"skiplist", fields:["value"]}); -~for (let i=0; i < 10000;i++) { db.acollection.insert({value:i}); } -|db._profileQuery(` -|LET list = (FOR doc in acollection FILTER doc.value > 90 RETURN doc) -|FOR a IN list -| FILTER a.value < 91 -| RETURN a`, {}, {colors: false, optimizer:{rules:["-all"]}} -); -~db._drop("acollection"); -@END_EXAMPLE_ARANGOSH_OUTPUT -@endDocuBlock 03_workWithAQL_profileQuerySubquery - -The resulting query profile contains a _SubqueryNode_ which has the runtime of -all its children combined. - -Actually, we cheated a little. The optimizer would have completely removed the -subquery if it had not been deactivated (`rules:["-all"]`). The optimimized -version would take longer in the "optimizing plan" stage, but should perform -better with a lot of results. - -Example: AQL with Aggregation ------------------------------ - -Let us try a more advanced query, using a [COLLECT](../Operations/Collect.md) -statement. Assume we have a user collection with each document having a city, -a username and an age attribute. - -The following query gets us all age groups in buckets (0-9, 10-19, 20-29, ...): - -@startDocuBlockInline 04_workWithAQL_profileQueryAggregation -@EXAMPLE_ARANGOSH_OUTPUT{04_workWithAQL_profileQueryAggregation} -~db._create('myusers'); -~["berlin", "paris", "cologne", "munich", "london"].forEach((c) => { ["peter", "david", "simon", "lars"].forEach( n => db.myusers.insert({ city : c, name : n, age: Math.floor(Math.random() * 75) }) ) }); -|db._profileQuery(` -|FOR u IN myusers -| COLLECT ageGroup = FLOOR(u.age / 10) * 10 -| AGGREGATE minAge = MIN(u.age), maxAge = MAX(u.age), len = LENGTH(u) -| RETURN { -| ageGroup, -| minAge, -| maxAge, -| len -| }`, {}, {colors: false} -); -~db._drop("myusers") -@END_EXAMPLE_ARANGOSH_OUTPUT -@endDocuBlock 04_workWithAQL_profileQueryAggregation - -Without any indexes this query should have to perform the following operations: - -1. Perfom a full collection scan via a _EnumerateCollectionNode_ and outputing - a row containg the document in `doc`. -2. Compute the expression `LET #1 = FLOOR(u.age / 10) * 10` for all inputs via - a _CalculationNode_ -3. Perform the aggregations via the _CollectNode_ -4. Sort the resulting aggregated rows via a _SortNode_ -5. Build a result value via another _CalculationNode_ -6. Put the result variable into the result set via the _ResultNode_ - -Like within the example above, you can see that after the _CalculationNode_ -stage, from the originally 20 rows only a handful remained. - -Typical AQL Performance Mistakes --------------------------------- - -With the new query profiler you should be able to spot typical performance -mistakes that we see quite often: - -- Not employing indexes to speed up queries with common filter expressions -- Not using shard keys in filter statements, when it is known - (only a cluster problem) -- Using subqueries to calculate an intermediary result, but only using a - few results - -Bad example: - -```js -LET vertices = ( - FOR v IN 1..2 ANY @startVertex GRAPH 'my_graph' - // <-- add a LIMIT 1 here - RETURN v -) -FOR doc IN collection - FILTER doc.value == vertices[0].value - RETURN doc -``` - -Adding a `LIMIT 1` into the subquery should result in better performance, -because the traversal can be stopped after the first result instead of -computing all paths. - -Another mistake is to start a graph traversal from the wrong side -(if both ends are known). - -Assume we have two vertex collections _users_ and _products_ as well as an -edge collection _purchased_. The graph model looks like this: -`(users) <--[purchased]--> (products)`, i.e. every user is connected with an -edge in _pruchased_ to zero or more _products_. - -If we want to know all users that have purchased the product _playstation_ -as well as produts of `type` _legwarmer_ we could use this query: - -```js -FOR prod IN products - FILTER prod.type == 'legwarmer' - FOR v,e,p IN 2..2 OUTBOUND prod purchased - FILTER v._key == 'playstation' // <-- last vertex of the path - RETURN p.vertices[1] // <-- the user -``` - -This query first finds all legwarmer products and then performs a traversal -for each of them. But we could also inverse the traversal by starting of with -the known _playstation_ product. This way we only need a single traversal -to achieve the same result: - -```js -FOR v,e,p IN 2..2 OUTBOUND 'product/playstation' purchased - FILTER v.type == 'legwarmer' // <-- last vertex of the path - RETURN p.vertices[1] // <-- the user -``` - diff --git a/Documentation/Books/AQL/ExecutionAndPerformance/QueryStatistics.md b/Documentation/Books/AQL/ExecutionAndPerformance/QueryStatistics.md deleted file mode 100644 index 7cf74d2d2ad1..000000000000 --- a/Documentation/Books/AQL/ExecutionAndPerformance/QueryStatistics.md +++ /dev/null @@ -1,62 +0,0 @@ -Query statistics -================ - -A query that has been executed will always return execution statistics. Execution statistics -can be retrieved by calling `getExtra()` on the cursor. The statistics are returned in the -return value's `stats` attribute: - - @startDocuBlockInline 06_workWithAQL_statementsExtra - @EXAMPLE_ARANGOSH_OUTPUT{06_workWithAQL_statementsExtra} - |db._query(` - | FOR i IN 1..@count INSERT - | { _key: CONCAT('anothertest', TO_STRING(i)) } - | INTO mycollection`, - | {count: 100}, - | {}, - | {fullCount: true} - ).getExtra(); - |db._query({ - | "query": `FOR i IN 200..@count INSERT - | { _key: CONCAT('anothertest', TO_STRING(i)) } - | INTO mycollection`, - | "bindVars": {count: 300}, - | "options": { fullCount: true} - }).getExtra(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 06_workWithAQL_statementsExtra - -The meaning of the statistics attributes is as follows: - -* *writesExecuted*: the total number of data-modification operations successfully executed. - This is equivalent to the number of documents created, updated or removed by `INSERT`, - `UPDATE`, `REPLACE` or `REMOVE` operations. -* *writesIgnored*: the total number of data-modification operations that were unsuccessful, - but have been ignored because of query option `ignoreErrors`. -* *scannedFull*: the total number of documents iterated over when scanning a collection - without an index. Documents scanned by subqueries will be included in the result, but not - no operations triggered by built-in or user-defined AQL functions. -* *scannedIndex*: the total number of documents iterated over when scanning a collection using - an index. Documents scanned by subqueries will be included in the result, but not - no operations triggered by built-in or user-defined AQL functions. -* *filtered*: the total number of documents that were removed after executing a filter condition - in a `FilterNode`. Note that `IndexRangeNode`s can also filter documents by selecting only - the required index range from a collection, and the `filtered` value only indicates how much - filtering was done by `FilterNode`s. -* *fullCount*: the total number of documents that matched the search condition if the query's - final top-level `LIMIT` statement were not present. - This attribute may only be returned if the `fullCount` option was set when starting the - query and will only contain a sensible value if the query contained a `LIMIT` operation on - the top level. -* *peakMemoryUsage*: the maximum memory usage of the query while it was running. In a cluster, - the memory accounting is done per shard, and the memory usage reported is the peak - memory usage value from the individual shards. - Note that to keep things light-weight, the per-query memory usage is tracked on a relatively - high level, not including any memory allocator overhead nor any memory used for temporary - results calculations (e.g. memory allocated/deallocated inside AQL expressions and function - calls). The attribute *peakMemoryUsage* is available from v3.4.3. -* *nodes*: _(optional)_ when the query was executed with the option `profile` set to at least *2*, - then this value contains runtime statistics per query execution node. This field contains the - node id (in `id`), the number of calls to this node `calls` and the number of items returned - by this node `items` (Items are the temporary results returned at this stage). You can correlate - this statistics with the `plan` returned in `extra`. For a human readable output you can execute - `db._profileQuery(, )` in the arangosh. diff --git a/Documentation/Books/AQL/ExecutionAndPerformance/README.md b/Documentation/Books/AQL/ExecutionAndPerformance/README.md deleted file mode 100644 index d053099fac31..000000000000 --- a/Documentation/Books/AQL/ExecutionAndPerformance/README.md +++ /dev/null @@ -1,22 +0,0 @@ -AQL Execution and Performance -============================= - -This chapter describes AQL features related to query executions and query performance. - -* [Execution statistics](QueryStatistics.md): A query that has been executed also returns statistics about its execution. - -* [Query parsing](ParsingQueries.md): Clients can use ArangoDB to check if a given AQL query is syntactically valid. - -* [Query execution plan](ExplainingQueries.md): If it is unclear how a given query will perform, clients can retrieve a query's execution plan from the AQL query optimizer without actually executing the query; this is called explaining. - -* [The AQL query optimizer](Optimizer.md): AQL queries are sent through an optimizer before execution. The task of the optimizer is to create an initial execution plan for the query, look for optimization opportunities and apply them. - -* [Query Profiling](QueryProfiler.md): Sometimes a query does not perform, but it is unclear which -parts of the plan are responsible. The query-profiler can show you execution statistics for every -stage of the query execution. - -* [The AQL query result cache](QueryCache.md): an optional query results cache can be used to avoid repeated calculation of the same query results. - -Be sure to check out the -[ArangoDB Performance Course](https://www.arangodb.com/arangodb-performance-course/) -for freshers as well. diff --git a/Documentation/Books/AQL/Extending/Conventions.md b/Documentation/Books/AQL/Extending/Conventions.md deleted file mode 100644 index 31e2fe83a18b..000000000000 --- a/Documentation/Books/AQL/Extending/Conventions.md +++ /dev/null @@ -1,119 +0,0 @@ -Conventions -=========== - -Naming ------- - -Built-in AQL functions that are shipped with ArangoDB reside in the namespace -*_aql*, which is also the default namespace to look in if an unqualified -function name is found. - -To refer to a user-defined AQL function, the function name must be fully -qualified to also include the user-defined namespace. The *::* symbol is used -as the namespace separator. Users can create a multi-level hierarchy of function -groups if required: - -```js -MYGROUP::MYFUNC() -MYFUNCTIONS::MATH::RANDOM() -``` - -**Note**: Adding user functions to the *_aql* namespace is disallowed and will -fail. - -User function names are case-insensitive like all function names in AQL. - -Variables and side effects --------------------------- - -User functions can take any number of input arguments and should -provide one result via a `return` statement. User functions should be kept -purely functional and thus free of side effects and state, and state modification. - -Modification of global variables is unsupported, as is reading or changing -the data of any collection from inside an AQL user function. - -User function code is late-bound, and may thus not rely on any variables -that existed at the time of declaration. If user function code requires -access to any external data, it must take care to set up the data by -itself. - -All AQL user function-specific variables should be introduced with the `var` -keyword in order to not accidentally access already defined variables from -outer scopes. Not using the `var` keyword for own variables may cause side -effects when executing the function. - -Here is an example that may modify outer scope variables `i` and `name`, -making the function **not** side-effect free: - -```js -function (values) { - for (i = 0; i < values.length; ++i) { - name = values[i]; - if (name === "foo") { - return i; - } - } - return null; -} -``` - -The above function can be made free of side effects by using the `var` or -`let` keywords, so the variables become function-local variables: - -```js -function (values) { - for (var i = 0; i < values.length; ++i) { - var name = values[i]; - if (name === "foo") { - return i; - } - } - return null; -} -``` - -Input parameters ----------------- - -In order to return a result, a user function should use a `return` instruction -rather than modifying its input parameters. - -AQL user functions are allowed to modify their input parameters for input -parameters that are null, boolean, numeric or string values. Modifying these -input parameter types inside a user function should be free of side effects. -However, user functions should not modify input parameters if the parameters are -arrays or objects and as such passed by reference, as that may modify variables -and state outside of the user function itself. - -Return values -------------- - -User functions must only return primitive types (i.e. *null*, boolean -values, numeric values, string values) or aggregate types (arrays or -objects) composed of these types. -Returning any other JavaScript object type (Function, Date, RegExp etc.) from -a user function may lead to undefined behavior and should be avoided. - -Enforcing strict mode ---------------------- - -By default, any user function code will be executed in *sloppy mode*, not -*strict* or *strong mode*. In order to make a user function run in strict -mode, use `"use strict"` explicitly inside the user function, e.g.: - -```js -function (values) { - "use strict" - - for (var i = 0; i < values.length; ++i) { - var name = values[i]; - if (name === "foo") { - return i; - } - } - return null; -} -``` - -Any violation of the strict mode will trigger a runtime error. diff --git a/Documentation/Books/AQL/Extending/Functions.md b/Documentation/Books/AQL/Extending/Functions.md deleted file mode 100644 index 58c593739a66..000000000000 --- a/Documentation/Books/AQL/Extending/Functions.md +++ /dev/null @@ -1,221 +0,0 @@ -Registering and Unregistering User Functions -============================================ - -User-defined functions (UDFs) can be registered in the selected database -using the *aqlfunctions* object as follows: - -```js -var aqlfunctions = require("@arangodb/aql/functions"); -``` - -To register a function, the fully qualified function name plus the -function code must be specified. This can easily be done in -[arangosh](../../Manual/Programs/Arangosh/index.html). The -[HTTP Interface](../../HTTP/AqlUserFunctions/index.html) also offers -User Functions management. - -In a cluster setup, make sure to connect to a coordinator to manage the UDFs. - -Documents in the *_aqlfunctions* collection (or any other system collection) -should not be accessed directly, but only via the dedicated interfaces. -Otherwise you might see caching issues or accidentally break something. -The interfaces will ensure the correct format of the documents and invalidate -the UDF cache. - -Registering an AQL user function --------------------------------- - -For testing, it may be sufficient to directly type the function code in the shell. -To manage more complex code, you may write it in the code editor of your choice -and save it as file. For example: - -```js -/* path/to/file.js */ -'use strict'; - -function greeting(name) { - if (name === undefined) { - name = "World"; - } - return `Hello ${name}!`; -} - -module.exports = greeting; -``` - -Then require it in the shell in order to register a user-defined function: - -``` -arangosh> var func = require("path/to/file.js"); -arangosh> aqlfunctions.register("HUMAN::GREETING", func, true); -``` - -Note that a return value of *false* means that the function `HUMAN::GREETING` -was newly created, and not that it failed to register. *true* is returned -if a function of that name existed before and was just updated. - -`aqlfunctions.register(name, code, isDeterministic)` - -Registers an AQL user function, identified by a fully qualified function -name. The function code in *code* must be specified as a JavaScript -function or a string representation of a JavaScript function. -If the function code in *code* is passed as a string, it is required that -the string evaluates to a JavaScript function definition. - -If a function identified by *name* already exists, the previous function -definition will be updated. Please also make sure that the function code -does not violate the [Conventions](Conventions.md) for AQL -functions. - -The *isDeterministic* attribute can be used to specify whether the -function results are fully deterministic (i.e. depend solely on the input -and are the same for repeated calls with the same input values). It is not -used at the moment but may be used for optimizations later. - -The registered function is stored in the selected database's system -collection *_aqlfunctions*. - -The function returns *true* when it updates/replaces an existing AQL -function of the same name, and *false* otherwise. It will throw an exception -when it detects syntactically invalid function code. - - -**Examples** - - -```js -require("@arangodb/aql/functions").register("MYFUNCTIONS::TEMPERATURE::CELSIUSTOFAHRENHEIT", -function (celsius) { - return celsius * 1.8 + 32; -}); -``` - -The function code will not be executed in *strict mode* or *strong mode* by -default. In order to make a user function being run in strict mode, use -`use strict` explicitly, e.g.: - -```js -require("@arangodb/aql/functions").register("MYFUNCTIONS::TEMPERATURE::CELSIUSTOFAHRENHEIT", -function (celsius) { - "use strict"; - return celsius * 1.8 + 32; -}); -``` - -You can access the name under which the AQL function is registered by accessing -the `name` property of `this` inside the JavaScript code: - -```js -require("@arangodb/aql/functions").register("MYFUNCTIONS::TEMPERATURE::CELSIUSTOFAHRENHEIT", -function (celsius) { - "use strict"; - if (typeof celsius === "undefined") { - const error = require("@arangodb").errors.ERROR_QUERY_FUNCTION_ARGUMENT_NUMBER_MISMATCH; - AQL_WARNING(error.code, require("util").format(error.message, this.name, 1, 1)); - } - return celsius * 1.8 + 32; -}); -``` - -`AQL_WARNING()` is automatically available to the code of user-defined -functions. The error code and message is retrieved via `@arangodb` module. -The *argument number mismatch* message has placeholders, which we can substitute -using [format()](http://nodejs.org/api/util.html): - -``` -invalid number of arguments for function '%s()', expected number of arguments: minimum: %d, maximum: %d -``` - -In the example above, `%s` is replaced by `this.name` (the AQL function name), -and both `%d` placeholders by `1` (number of expected arguments). If you call -the function without an argument, you will see this: - -``` -arangosh> db._query("RETURN MYFUNCTIONS::TEMPERATURE::CELSIUSTOFAHRENHEIT()") -[object ArangoQueryCursor, count: 1, hasMore: false, warning: 1541 - invalid -number of arguments for function 'MYFUNCTIONS::TEMPERATURE::CELSIUSTOFAHRENHEIT()', -expected number of arguments: minimum: 1, maximum: 1] - -[ - null -] -``` - -Deleting an existing AQL user function --------------------------------------- - -`aqlfunctions.unregister(name)` - -Unregisters an existing AQL user function, identified by the fully qualified -function name. - -Trying to unregister a function that does not exist will result in an -exception. - - -**Examples** - - -```js -require("@arangodb/aql/functions").unregister("MYFUNCTIONS::TEMPERATURE::CELSIUSTOFAHRENHEIT"); -``` - - -Unregister Group ----------------- - - - - -delete a group of AQL user functions -`aqlfunctions.unregisterGroup(prefix)` - -Unregisters a group of AQL user function, identified by a common function -group prefix. - -This will return the number of functions unregistered. - - -**Examples** - - -```js -require("@arangodb/aql/functions").unregisterGroup("MYFUNCTIONS::TEMPERATURE"); - -require("@arangodb/aql/functions").unregisterGroup("MYFUNCTIONS"); -``` - - -Listing all AQL user functions ------------------------------- - -`aqlfunctions.toArray()` - -Returns all previously registered AQL user functions, with their fully -qualified names and function code. - -The result may optionally be restricted to a specified group of functions -by specifying a group prefix: - -`aqlfunctions.toArray(prefix)` - - -**Examples** - -To list all available user functions: - -```js -require("@arangodb/aql/functions").toArray(); -``` - -To list all available user functions in the *MYFUNCTIONS* namespace: - -```js -require("@arangodb/aql/functions").toArray("MYFUNCTIONS"); -``` - -To list all available user functions in the *MYFUNCTIONS::TEMPERATURE* namespace: - -```js -require("@arangodb/aql/functions").toArray("MYFUNCTIONS::TEMPERATURE"); -``` diff --git a/Documentation/Books/AQL/Extending/README.md b/Documentation/Books/AQL/Extending/README.md deleted file mode 100644 index 8558a59d9b48..000000000000 --- a/Documentation/Books/AQL/Extending/README.md +++ /dev/null @@ -1,73 +0,0 @@ -Extending AQL with User Functions -================================= - -AQL comes with a [built-in set of functions](../Functions/README.md), but it is -not a fully-featured programming language. - -To add missing functionality or to simplify queries, users may add their own -functions to AQL in the selected database. These functions are written in -JavaScript, and are deployed via an API; see [Registering Functions](Functions.md). - -In order to avoid conflicts with existing or future built-in function names, -all user defined functions (**UDF**) have to be put into separate namespaces. -Invoking a UDF is then possible by referring to the fully-qualified function name, -which includes the namespace, too; see [Conventions](Conventions.md). - -Technical Details ------------------ - -### Known Limitations - -{% hint 'warning' %} -UDFs can have serious effects on the performance of your queries and the resource -usage in ArangoDB. Especially in cluster setups they should not be used against -much data, because this data will need to be sent over the network back and forth -between _DBservers_ and _Coordinators_, potentially adding a lot of latency. -This can be mitigated by very selective `FILTER`s before calls to UDFs. -{% endhint %} - -Since the optimizer doesn't know anything about the nature of your function, -**the optimizer can't use indices for UDFs**. So you should never lean on a UDF -as the primary criterion for a `FILTER` statement to reduce your query result set. -Instead, put a another `FILTER` statement in front of it. You should make sure -that this [**`FILTER` statement** is effective](../ExecutionAndPerformance/Optimizer.md) -to reduce the query result before passing it to your UDF. - -Rule of thumb is, the closer the UDF is to your final `RETURN` statement -(or maybe even inside it), the better. - -When used in clusters, UDFs are always executed on the -[coordinator](../../Manual/Architecture/DeploymentModes/Cluster/Architecture.html). - -As UDFs are written in JavaScript, each query that executes a UDF will acquire -one V8 context to execute the UDFs in it. V8 contexts can be re-used across subsequent -queries, but when UDF-invoking queries run in parallel, they will each require a -dedicated V8 context. - -Using UDFs in clusters may thus result in a higher resource allocation -in terms of used V8 contexts and server threads. If you run out -of these resources, your query may abort with a -[**cluster backend unavailable**](../../Manual/Appendix/ErrorCodes.html) error. - -To overcome these mentioned limitations, you may want to increase the -[number of available V8 contexts](../../Manual/Programs/Arangod/Javascript.html#v8-contexts) -(at the expense of increased memory usage), and the -[number of available server threads](../../Manual/Programs/Arangod/Server.html#server-threads). - -### Deployment Details - -Internally, UDFs are stored in a system collection named `_aqlfunctions` -of the selected database. When an AQL statement refers to such a UDF, -it is loaded from that collection. The UDFs will be exclusively -available for queries in that particular database. - -Since the coordinator doesn't have own local collections, the `_aqlfunctions` -collection is sharded across the cluster. Therefore (as usual), it has to be -accessed through a coordinator - you mustn't talk to the shards directly. -Once it is in the `_aqlfunctions` collection, it is available on all -coordinators without additional effort. - -Keep in mind that system collections are excluded from dumps created with -[arangodump](../../Manual/Programs/Arangodump/index.html) by default. -To include AQL UDF in a dump, the dump needs to be started with -the option *--include-system-collections true*. diff --git a/Documentation/Books/AQL/FOOTER.html b/Documentation/Books/AQL/FOOTER.html deleted file mode 100644 index 239869bfaf6a..000000000000 --- a/Documentation/Books/AQL/FOOTER.html +++ /dev/null @@ -1 +0,0 @@ -© ArangoDB - the native multi-model NoSQL database \ No newline at end of file diff --git a/Documentation/Books/AQL/Functions/Array.md b/Documentation/Books/AQL/Functions/Array.md deleted file mode 100644 index 07118118c66c..000000000000 --- a/Documentation/Books/AQL/Functions/Array.md +++ /dev/null @@ -1,743 +0,0 @@ -# Array functions - -AQL provides functions for higher-level array manipulation. Also see the -[numeric functions](Numeric.md) for functions that work on number arrays. -If you want to concatenate the elements of an array equivalent to `join()` -in JavaScript, see [CONCAT()](String.md#concat) and -[CONCAT_SEPARATOR()](String.md#concatseparator) in the string functions chapter. - -Apart from that, AQL also offers several language constructs: - -- simple [array access](../Fundamentals/DataTypes.md#arrays--lists) of individual elements, -- [array operators](../Advanced/ArrayOperators.md) for array expansion and contraction, - optionally with inline filter, limit and projection, -- [array comparison operators](../Operators.md#array-comparison-operators) to compare - each element in an array to a value or the elements of another array, -- loop-based operations on arrays using [FOR](../Operations/For.md), - [SORT](../Operations/Sort.md), - [LIMIT](../Operations/Limit.md), - as well as [COLLECT](../Operations/Collect.md) for grouping, - which also offers efficient aggregation. - -## APPEND() - -`APPEND(anyArray, values, unique) → newArray` - -Add all elements of an array to another array. All values are added at the end of the -array (right side). - -It can also be used to append a single element to an array. It is not necessary to wrap -it in an array (unless it is an array itself). You may also use [PUSH()](#push) instead. - -- **anyArray** (array): array with elements of arbitrary type -- **values** (array|any): array, whose elements shall be added to *anyArray* -- **unique** (bool, *optional*): if set to *true*, only those *values* will be added - that are not already contained in *anyArray*. The default is *false*. -- returns **newArray** (array): the modified array - -**Examples** - -@startDocuBlockInline aqlArrayAppend_1 -@EXAMPLE_AQL{aqlArrayAppend_1} -RETURN APPEND([ 1, 2, 3 ], [ 5, 6, 9 ]) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayAppend_1 - -@startDocuBlockInline aqlArrayAppend_2 -@EXAMPLE_AQL{aqlArrayAppend_2} -RETURN APPEND([ 1, 2, 3 ], [ 3, 4, 5, 2, 9 ], true) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayAppend_2 - -## CONTAINS_ARRAY() - -This is an alias for [POSITION()](#position). - - -## COUNT() - -This is an alias for [LENGTH()](#length). - -## COUNT_DISTINCT() - -`COUNT_DISTINCT(anyArray) → number` - -Get the number of distinct elements in an array. - -- **anyArray** (array): array with elements of arbitrary type -- returns **number**: the number of distinct elements in *anyArray*. - -**Examples** - -@startDocuBlockInline aqlArrayCountDistinct_1 -@EXAMPLE_AQL{aqlArrayCountDistinct_1} -RETURN COUNT_DISTINCT([ 1, 2, 3 ]) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayCountDistinct_1 - -@startDocuBlockInline aqlArrayCountDistinct_2 -@EXAMPLE_AQL{aqlArrayCountDistinct_2} -RETURN COUNT_DISTINCT([ "yes", "no", "yes", "sauron", "no", "yes" ]) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayCountDistinct_2 - -## COUNT_UNIQUE() - -This is an alias for [COUNT_DISTINCT()](#countdistinct). - -## FIRST() - -`FIRST(anyArray) → firstElement` - -Get the first element of an array. It is the same as `anyArray[0]`. - -- **anyArray** (array): array with elements of arbitrary type -- returns **firstElement** (any|null): the first element of *anyArray*, or *null* if - the array is empty. - -**Examples** - -@startDocuBlockInline aqlArrayFirst_1 -@EXAMPLE_AQL{aqlArrayFirst_1} -RETURN FIRST([ 1, 2, 3 ]) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayFirst_1 - -@startDocuBlockInline aqlArrayFirst_2 -@EXAMPLE_AQL{aqlArrayFirst_2} -RETURN FIRST([]) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayFirst_2 - -## FLATTEN() - -`FLATTEN(anyArray, depth) → flatArray` - -Turn an array of arrays into a flat array. All array elements in *array* will be -expanded in the result array. Non-array elements are added as they are. The function -will recurse into sub-arrays up to the specified depth. Duplicates will not be removed. - -Also see [array contraction](../Advanced/ArrayOperators.md#array-contraction). - -- **array** (array): array with elements of arbitrary type, including nested arrays -- **depth** (number, *optional*): flatten up to this many levels, the default is 1 -- returns **flatArray** (array): a flattened array - -**Examples** - -@startDocuBlockInline aqlArrayFlatten_1 -@EXAMPLE_AQL{aqlArrayFlatten_1} -RETURN FLATTEN( [ 1, 2, [ 3, 4 ], 5, [ 6, 7 ], [ 8, [ 9, 10 ] ] ] ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayFlatten_1 - -To fully flatten the example array, use a *depth* of 2: - -@startDocuBlockInline aqlArrayFlatten_2 -@EXAMPLE_AQL{aqlArrayFlatten_2} -RETURN FLATTEN( [ 1, 2, [ 3, 4 ], 5, [ 6, 7 ], [ 8, [ 9, 10 ] ] ], 2 ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayFlatten_2 - -## INTERSECTION() - -`INTERSECTION(array1, array2, ... arrayN) → newArray` - -Return the intersection of all arrays specified. The result is an array of values that -occur in all arguments. - -Other set operations are [UNION()](#union), -[MINUS()](#minus) and -[OUTERSECTION()](#outersection). - -- **arrays** (array, *repeatable*): an arbitrary number of arrays as multiple arguments - (at least 2) -- returns **newArray** (array): a single array with only the elements, which exist in all - provided arrays. The element order is random. Duplicates are removed. - -**Examples** - -@startDocuBlockInline aqlArrayIntersection_1 -@EXAMPLE_AQL{aqlArrayIntersection_1} -RETURN INTERSECTION( [1,2,3,4,5], [2,3,4,5,6], [3,4,5,6,7] ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayIntersection_1 - -@startDocuBlockInline aqlArrayIntersection_2 -@EXAMPLE_AQL{aqlArrayIntersection_2} -RETURN INTERSECTION( [2,4,6], [8,10,12], [14,16,18] ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayIntersection_2 - -## LAST() - -`LAST(anyArray) → lastElement` - -Get the last element of an array. It is the same as `anyArray[-1]`. - -- **anyArray** (array): array with elements of arbitrary type -- returns **lastElement** (any|null): the last element of *anyArray* or *null* if the - array is empty. - -**Example** - -@startDocuBlockInline aqlArrayLast_1 -@EXAMPLE_AQL{aqlArrayLast_1} -RETURN LAST( [1,2,3,4,5] ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayLast_1 - -## LENGTH() - -`LENGTH(anyArray) → length` - -Determine the number of elements in an array. - -- **anyArray** (array): array with elements of arbitrary type -- returns **length** (number): the number of array elements in *anyArray*. - -*LENGTH()* can also determine the [number of attribute keys](Document.md#length) -of an object / document, the [amount of documents](Miscellaneous.md#length) in a -collection and the [character length](String.md#length) of a string. - -|input|length| -|---|---| -|String|number of unicode characters| -|Number|number of unicode characters that represent the number| -|Array|number of elements| -|Object|number of first level elements| -|true|1| -|false|0| -|null|0| - -**Examples** - -@startDocuBlockInline aqlArrayLength_1 -@EXAMPLE_AQL{aqlArrayLength_1} -RETURN LENGTH( "🥑" ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayLength_1 - -@startDocuBlockInline aqlArrayLength_2 -@EXAMPLE_AQL{aqlArrayLength_2} -RETURN LENGTH( 1234 ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayLength_2 - -@startDocuBlockInline aqlArrayLength_3 -@EXAMPLE_AQL{aqlArrayLength_3} -RETURN LENGTH( [1,2,3,4,5,6,7] ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayLength_3 - -@startDocuBlockInline aqlArrayLength_4 -@EXAMPLE_AQL{aqlArrayLength_4} -RETURN LENGTH( [1,2,3,4,5,6,7] ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayLength_4 - -@startDocuBlockInline aqlArrayLength_5 -@EXAMPLE_AQL{aqlArrayLength_5} -RETURN LENGTH( {a:1, b:2, c:3, d:4, e:{f:5,g:6}} ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayLength_5 - -## MINUS() - -`MINUS(array1, array2, ... arrayN) → newArray` - -Return the difference of all arrays specified. - -Other set operations are [UNION()](#union), -[INTERSECTION()](#intersection) and -[OUTERSECTION()](#outersection). - -- **arrays** (array, *repeatable*): an arbitrary number of arrays as multiple - arguments (at least 2) -- returns **newArray** (array): an array of values that occur in the first array, - but not in any of the subsequent arrays. The order of the result array is undefined - and should not be relied on. Duplicates will be removed. - -**Example** - -@startDocuBlockInline aqlArrayMinus_1 -@EXAMPLE_AQL{aqlArrayMinus_1} -RETURN MINUS( [1,2,3,4], [3,4,5,6], [5,6,7,8] ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayMinus_1 - -## NTH() - -`NTH(anyArray, position) → nthElement` - -Get the element of an array at a given position. It is the same as `anyArray[position]` -for positive positions, but does not support negative positions. - -- **anyArray** (array): array with elements of arbitrary type -- **position** (number): position of desired element in array, positions start at 0 -- returns **nthElement** (any|null): the array element at the given *position*. - If *position* is negative or beyond the upper bound of the array, - then *null* will be returned. - -**Examples** - -@startDocuBlockInline aqlArrayNth_1 -@EXAMPLE_AQL{aqlArrayNth_1} -RETURN NTH( [ "foo", "bar", "baz" ], 2 ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayNth_1 - -@startDocuBlockInline aqlArrayNth_2 -@EXAMPLE_AQL{aqlArrayNth_2} -RETURN NTH( [ "foo", "bar", "baz" ], 3 ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayNth_2 - -@startDocuBlockInline aqlArrayNth_3 -@EXAMPLE_AQL{aqlArrayNth_3} -RETURN NTH( [ "foo", "bar", "baz" ], -1 ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayNth_3 - -## OUTERSECTION() - -`OUTERSECTION(array1, array2, ... arrayN) → newArray` - -Return the values that occur only once across all arrays specified. - -Other set operations are [UNION()](#union), -[MINUS()](#minus) and -[INTERSECTION()](#intersection). - -- **arrays** (array, *repeatable*): an arbitrary number of arrays as multiple arguments - (at least 2) -- returns **newArray** (array): a single array with only the elements that exist only once - across all provided arrays. The element order is random. - -**Example** - -@startDocuBlockInline aqlArrayOutersection_1 -@EXAMPLE_AQL{aqlArrayOutersection_1} -RETURN OUTERSECTION( [ 1, 2, 3 ], [ 2, 3, 4 ], [ 3, 4, 5 ] ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayOutersection_1 - -## POP() - -`POP(anyArray) → newArray` - -Remove the last element of *array*. - -To append an element (right side), see [PUSH()](#push).
-To remove the first element, see [SHIFT()](#shift).
-To remove an element at an arbitrary position, see [REMOVE_NTH()](#removenth). - -- **anyArray** (array): an array with elements of arbitrary type -- returns **newArray** (array): *anyArray* without the last element. If it's already - empty or has only a single element left, an empty array is returned. - -**Examples** - -@startDocuBlockInline aqlArrayPop_1 -@EXAMPLE_AQL{aqlArrayPop_1} -RETURN POP( [ 1, 2, 3, 4 ] ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayPop_1 - -@startDocuBlockInline aqlArrayPop_2 -@EXAMPLE_AQL{aqlArrayPop_2} -RETURN POP( [ 1 ] ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayPop_2 - -## POSITION() - -`POSITION(anyArray, search, returnIndex) → position` - -Return whether *search* is contained in *array*. Optionally return the position. - -- **anyArray** (array): the haystack, an array with elements of arbitrary type -- **search** (any): the needle, an element of arbitrary type -- **returnIndex** (bool, *optional*): if set to *true*, the position of the match - is returned instead of a boolean. The default is *false*. -- returns **position** (bool|number): *true* if *search* is contained in *anyArray*, - *false* otherwise. If *returnIndex* is enabled, the position of the match is - returned (positions start at 0), or *-1* if it's not found. - -To determine if or at which position a string occurs in another string, see the -[CONTAINS() string function](String.md#contains). - -**Examples** - -@startDocuBlockInline aqlArrayPosition_1 -@EXAMPLE_AQL{aqlArrayPosition_1} -RETURN POSITION( [2,4,6,8], 4 ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayPosition_1 - -@startDocuBlockInline aqlArrayPosition_2 -@EXAMPLE_AQL{aqlArrayPosition_2} -RETURN POSITION( [2,4,6,8], 4, true ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayPosition_2 - -## PUSH() - -`PUSH(anyArray, value, unique) → newArray` - -Append *value* to *anyArray* (right side). - -To remove the last element, see [POP()](#pop).
-To prepend a value (left side), see [UNSHIFT()](#unshift).
-To append multiple elements, see [APPEND()](#append). - -- **anyArray** (array): array with elements of arbitrary type -- **value** (any): an element of arbitrary type -- **unique** (bool): if set to *true*, then *value* is not added if already - present in the array. The default is *false*. -- returns **newArray** (array): *anyArray* with *value* added at the end - (right side) - -Note: The *unique* flag only controls if *value* is added if it's already present -in *anyArray*. Duplicate elements that already exist in *anyArray* will not be -removed. To make an array unique, use the [UNIQUE()](#unique) function. - -**Examples** - -@startDocuBlockInline aqlArrayPush_1 -@EXAMPLE_AQL{aqlArrayPush_1} -RETURN PUSH([ 1, 2, 3 ], 4) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayPush_1 - -@startDocuBlockInline aqlArrayPush_2 -@EXAMPLE_AQL{aqlArrayPush_2} -RETURN PUSH([ 1, 2, 2, 3 ], 2, true) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayPush_2 - -## REMOVE_NTH() - -`REMOVE_NTH(anyArray, position) → newArray` - -Remove the element at *position* from the *anyArray*. - -To remove the first element, see [SHIFT()](#shift).
-To remove the last element, see [POP()](#pop). - -- **anyArray** (array): array with elements of arbitrary type -- **position** (number): the position of the element to remove. Positions start - at 0. Negative positions are supported, with -1 being the last array element. - If *position* is out of bounds, the array is returned unmodified. -- returns **newArray** (array): *anyArray* without the element at *position* - -**Examples** - -@startDocuBlockInline aqlArrayRemoveNth_1 -@EXAMPLE_AQL{aqlArrayRemoveNth_1} -RETURN REMOVE_NTH( [ "a", "b", "c", "d", "e" ], 1 ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayRemoveNth_1 - -@startDocuBlockInline aqlArrayRemoveNth_2 -@EXAMPLE_AQL{aqlArrayRemoveNth_2} -RETURN REMOVE_NTH( [ "a", "b", "c", "d", "e" ], -2 ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayRemoveNth_2 - -## REMOVE_VALUE() - -`REMOVE_VALUE(anyArray, value, limit) → newArray` - -Remove all occurrences of *value* in *anyArray*. Optionally with a *limit* -to the number of removals. - -- **anyArray** (array): array with elements of arbitrary type -- **value** (any): an element of arbitrary type -- **limit** (number, *optional*): cap the number of removals to this value -- returns **newArray** (array): *anyArray* with *value* removed - -**Examples** - -@startDocuBlockInline aqlArrayRemoveValue_1 -@EXAMPLE_AQL{aqlArrayRemoveValue_1} -RETURN REMOVE_VALUE( [ "a", "b", "b", "a", "c" ], "a" ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayRemoveValue_1 - -@startDocuBlockInline aqlArrayRemoveValue_2 -@EXAMPLE_AQL{aqlArrayRemoveValue_2} -RETURN REMOVE_VALUE( [ "a", "b", "b", "a", "c" ], "a", 1 ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayRemoveValue_2 - -## REMOVE_VALUES() - -`REMOVE_VALUES(anyArray, values) → newArray` - -Remove all occurrences of any of the *values* from *anyArray*. - -- **anyArray** (array): array with elements of arbitrary type -- **values** (array): an array with elements of arbitrary type, that shall - be removed from *anyArray* -- returns **newArray** (array): *anyArray* with all individual *values* removed - -**Example** - -@startDocuBlockInline aqlArrayRemoveValues_1 -@EXAMPLE_AQL{aqlArrayRemoveValues_1} -RETURN REMOVE_VALUES( [ "a", "a", "b", "c", "d", "e", "f" ], [ "a", "f", "d" ] ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayRemoveValues_1 - -## REVERSE() - -`REVERSE(anyArray) → reversedArray` - -Return an array with its elements reversed. - -- **anyArray** (array): array with elements of arbitrary type -- returns **reversedArray** (array): a new array with all elements of *anyArray* in - reversed order - -**Example** - -@startDocuBlockInline aqlArrayReverse_1 -@EXAMPLE_AQL{aqlArrayReverse_1} -RETURN REVERSE ( [2,4,6,8,10] ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayReverse_1 - -## SHIFT() - -`SHIFT(anyArray) → newArray` - -Remove the first element of *anyArray*. - -To prepend an element (left side), see [UNSHIFT()](#unshift).
-To remove the last element, see [POP()](#pop).
-To remove an element at an arbitrary position, see [REMOVE_NTH()](#removenth). - -- **anyArray** (array): array with elements with arbitrary type -- returns **newArray** (array): *anyArray* without the left-most element. If *anyArray* - is already empty or has only one element left, an empty array is returned. - -**Examples** - -@startDocuBlockInline aqlArrayShift_1 -@EXAMPLE_AQL{aqlArrayShift_1} -RETURN SHIFT( [ 1, 2, 3, 4 ] ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayShift_1 - -@startDocuBlockInline aqlArrayShift_2 -@EXAMPLE_AQL{aqlArrayShift_2} -RETURN SHIFT( [ 1 ] ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayShift_2 - -## SLICE() - -`SLICE(anyArray, start, length) → newArray` - -Extract a slice of *anyArray*. - -- **anyArray** (array): array with elements of arbitrary type -- **start** (number): start extraction at this element. Positions start at 0. - Negative values indicate positions from the end of the array. -- **length** (number, *optional*): extract up to *length* elements, or all - elements from *start* up to *length* if negative (exclusive) -- returns **newArray** (array): the specified slice of *anyArray*. If *length* - is not specified, all array elements starting at *start* will be returned. - -**Examples** - -@startDocuBlockInline aqlArraySlice_1 -@EXAMPLE_AQL{aqlArraySlice_1} -RETURN SLICE( [ 1, 2, 3, 4, 5 ], 0, 1 ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArraySlice_1 - -@startDocuBlockInline aqlArraySlice_2 -@EXAMPLE_AQL{aqlArraySlice_2} -RETURN SLICE( [ 1, 2, 3, 4, 5 ], 1, 2 ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArraySlice_2 - -@startDocuBlockInline aqlArraySlice_3 -@EXAMPLE_AQL{aqlArraySlice_3} -RETURN SLICE( [ 1, 2, 3, 4, 5 ], 3 ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArraySlice_3 - -@startDocuBlockInline aqlArraySlice_4 -@EXAMPLE_AQL{aqlArraySlice_4} -RETURN SLICE( [ 1, 2, 3, 4, 5 ], 1, -1 ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArraySlice_4 - -@startDocuBlockInline aqlArraySlice_5 -@EXAMPLE_AQL{aqlArraySlice_5} -RETURN SLICE( [ 1, 2, 3, 4, 5 ], 0, -2 ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArraySlice_5 - -@startDocuBlockInline aqlArraySlice_6 -@EXAMPLE_AQL{aqlArraySlice_6} -RETURN SLICE( [ 1, 2, 3, 4, 5 ], -3, 2 ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArraySlice_6 - -## SORTED() - -`SORTED(anyArray) → newArray` - -Sort all elements in *anyArray*. The function will use the default comparison -order for AQL value types. - -- **anyArray** (array): array with elements of arbitrary type -- returns **newArray** (array): *anyArray*, with elements sorted - -**Example** - -@startDocuBlockInline aqlArraySorted_1 -@EXAMPLE_AQL{aqlArraySorted_1} -RETURN SORTED( [ 8,4,2,10,6 ] ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArraySorted_1 - -## SORTED_UNIQUE() - -`SORTED_UNIQUE(anyArray) → newArray` - -Sort all elements in *anyArray*. The function will use the default comparison -order for AQL value types. Additionally, the values in the result array will -be made unique. - -- **anyArray** (array): array with elements of arbitrary type -- returns **newArray** (array): *anyArray*, with elements sorted and duplicates - removed - -**Example** - -@startDocuBlockInline aqlArraySortedUnique_1 -@EXAMPLE_AQL{aqlArraySortedUnique_1} -RETURN SORTED_UNIQUE( [ 8,4,2,10,6,2,8,6,4 ] ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArraySortedUnique_1 - -## UNION() - -`UNION(array1, array2, ... arrayN) → newArray` - -Return the union of all arrays specified. - -Other set operations are [MINUS()](#minus), -[INTERSECTION()](#intersection) and -[OUTERSECTION()](#outersection). - -- **arrays** (array, *repeatable*): an arbitrary number of arrays as multiple - arguments (at least 2) -- returns **newArray** (array): all array elements combined in a single array, - in any order - -**Examples** - -@startDocuBlockInline aqlArrayUnion_1 -@EXAMPLE_AQL{aqlArrayUnion_1} -RETURN UNION( - [ 1, 2, 3 ], - [ 1, 2 ] -) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayUnion_1 - -Note: No duplicates will be removed. In order to remove duplicates, please use -either [UNION_DISTINCT()](#uniondistinct) -or apply [UNIQUE()](#unique) on the -result of *UNION()*: - -@startDocuBlockInline aqlArrayUnion_2 -@EXAMPLE_AQL{aqlArrayUnion_2} -RETURN UNIQUE( - UNION( - [ 1, 2, 3 ], - [ 1, 2 ] - ) -) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayUnion_2 - -## UNION_DISTINCT() - -`UNION_DISTINCT(array1, array2, ... arrayN) → newArray` - -Return the union of distinct values of all arrays specified. - -- **arrays** (array, *repeatable*): an arbitrary number of arrays as multiple - arguments (at least 2) -- returns **newArray** (array): the elements of all given arrays in a single - array, without duplicates, in any order - -**Example** - -@startDocuBlockInline aqlArrayUnionDistinct_1 -@EXAMPLE_AQL{aqlArrayUnionDistinct_1} -RETURN UNION_DISTINCT( - [ 1, 2, 3 ], - [ 1, 2 ] -) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayUnionDistinct_1 - -## UNIQUE() - -`UNIQUE(anyArray) → newArray` - -Return all unique elements in *anyArray*. To determine uniqueness, the -function will use the comparison order. - -- **anyArray** (array): array with elements of arbitrary type -- returns **newArray** (array): *anyArray* without duplicates, in any order - -**Example** - -@startDocuBlockInline aqlArrayUnique_1 -@EXAMPLE_AQL{aqlArrayUnique_1} -RETURN UNIQUE( [ 1,2,2,3,3,3,4,4,4,4,5,5,5,5,5 ] ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayUnique_1 - -## UNSHIFT() - -`UNSHIFT(anyArray, value, unique) → newArray` - -Prepend *value* to *anyArray* (left side). - -To remove the first element, see [SHIFT()](#shift).
-To append a value (right side), see [PUSH()](#push). - -- **anyArray** (array): array with elements of arbitrary type -- **value** (any): an element of arbitrary type -- **unique** (bool): if set to *true*, then *value* is not added if already - present in the array. The default is *false*. -- returns **newArray** (array): *anyArray* with *value* added at the start - (left side) - -Note: The *unique* flag only controls if *value* is added if it's already present -in *anyArray*. Duplicate elements that already exist in *anyArray* will not be -removed. To make an array unique, use the [UNIQUE()](#unique) function. - -**Examples** - -@startDocuBlockInline aqlArrayUnshift_1 -@EXAMPLE_AQL{aqlArrayUnshift_1} -RETURN UNSHIFT( [ 1, 2, 3 ], 4 ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayUnshift_1 - -@startDocuBlockInline aqlArrayUnshift_2 -@EXAMPLE_AQL{aqlArrayUnshift_2} -RETURN UNSHIFT( [ 1, 2, 3 ], 2, true ) -@END_EXAMPLE_AQL -@endDocuBlock aqlArrayUnshift_2 diff --git a/Documentation/Books/AQL/Functions/Date.md b/Documentation/Books/AQL/Functions/Date.md deleted file mode 100644 index a9be5f176ab8..000000000000 --- a/Documentation/Books/AQL/Functions/Date.md +++ /dev/null @@ -1,697 +0,0 @@ -Date functions -============== - -AQL offers functionality to work with dates. Dates are no data types of their own in -AQL (neither are they in JSON, which is usually used as format to ship data into and -out of ArangoDB). Instead, dates in AQL are represented by either numbers or strings. - -All date function operations are done in the *unix time* system. Unix time counts -all non leap seconds beginning with January 1st 1970 00:00:00.000 UTC, also know as -the Unix epoch. A point in time is called timestamp. A timestamp has the same value -at every point on earth. The date functions use millisecond precision for timestamps. - -time unit definitions - -* millisecond: 1/1000 of a second -* second: one [SI second](https://www.bipm.org/en/publications/si-brochure/second.html) -* Minute: one minute is defined as 60 seconds -* Hour: one hour is defined as 60 minutes -* day: one day is defined as 24 hours -* week: one week is defined as 7 days -* year: one year is defined as 365.2425 days -* month: one month is defined as 1/12 of a year - -All functions that require dates as arguments accept the following input values: - -- numeric timestamps, millisecond precision; - An example timestamp value is *1399472349522*, which translates to - *2014-05-07T14:19:09.522Z*. - -- date time strings in formats *YYYY-MM-DDTHH:MM:SS.MMM*, - *YYYY-MM-DD HH:MM:SS.MMM* or *YYYY-MM-DD*; Milliseconds are always optional. - A time offset may optionally be added at the end of the string, with the - hours and minutes that need to be added or subtracted to the date time value. - For example, *2014-05-07T14:19:09+01:00* can be used to specify a one hour offset, - and *2014-05-07T14:19:09+07:30* can be specified for seven and half hours offset. - Negative offsets are also possible. Alternatively to an offset, a *Z* can be used - to indicate UTC / Zulu time. - - An example value is *2014-05-07T14:19:09.522Z* meaning May 7th 2014, 14:19:09 and - 522 milliseconds, UTC / Zulu time. Another example value without time component is - *2014-05-07Z*. - -```js -DATE_HOUR( 2 * 60 * 60 * 1000 ) // 2 -DATE_HOUR("1970-01-01T02:00:00") // 2 -``` - -You are free to store age determinations of specimens, incomplete or fuzzy dates and -the like in different, more appropriate ways of course. AQL's date functions will -most certainly not be of any help for such dates, but you can still use language -constructs like [SORT](../Operations/Sort.md) (which also supports sorting of arrays) -and [indexes](../../Manual/Indexing/index.html) like skiplists. - -Current date and time ---------------------- - -### DATE_NOW() - -`DATE_NOW() → timestamp` - -Get the current unix time as numeric timestamp. - -- returns **timestamp** (number): the current unix time as a timestamp. - The return value has millisecond precision. To convert the return value to - seconds, divide it by 1000. - -Note that this function is evaluated on every invocation and may return -different values when invoked multiple times in the same query. Assign it -to a variable to use the exact same timestamp multiple times. - -Conversion ----------- - -*DATE_TIMESTAMP()* and *DATE_ISO8601()* can be used to convert ISO 8601 date time -strings to numeric timestamps and numeric timestamps to ISO 8601 date time strings. - -Both also support individual date components as separate function arguments, -in the following order: - -- year -- month -- day -- hour -- minute -- second -- millisecond - -All components following *day* are optional and can be omitted. Note that no -time offset can be specified when using separate date components, and UTC / -Zulu time will be used. - -The following calls to *DATE_TIMESTAMP()* are equivalent and will all return -*1399472349522*: - -```js -DATE_TIMESTAMP("2014-05-07T14:19:09.522") -DATE_TIMESTAMP("2014-05-07T14:19:09.522Z") -DATE_TIMESTAMP("2014-05-07 14:19:09.522") -DATE_TIMESTAMP("2014-05-07 14:19:09.522Z") -DATE_TIMESTAMP(2014, 5, 7, 14, 19, 9, 522) -DATE_TIMESTAMP(1399472349522) -``` - -The same is true for calls to *DATE_ISO8601()* that also accepts variable input -formats: - -```js -DATE_ISO8601("2014-05-07T14:19:09.522Z") -DATE_ISO8601("2014-05-07 14:19:09.522Z") -DATE_ISO8601(2014, 5, 7, 14, 19, 9, 522) -DATE_ISO8601(1399472349522) -``` - -The above functions are all equivalent and will return *"2014-05-07T14:19:09.522Z"*. - -### DATE_ISO8601() - -`DATE_ISO8601(date) → dateString` - -Return an ISO 8601 date time string from *date*. -The date time string will always use UTC / Zulu time, indicated by the *Z* at its end. - -- **date** (number|string): numeric timestamp or ISO 8601 date time string -- returns **dateString**: date and time expressed according to ISO 8601, in Zulu time - -`DATE_ISO8601(year, month, day, hour, minute, second, millisecond) → dateString` - -Return a ISO 8601 date time string from *date*, but allows to specify the individual -date components separately. All parameters after *day* are optional. - -- **year** (number): typically in the range 0..9999, e.g. *2017* -- **month** (number): 1..12 for January through December -- **day** (number): 1..31 (upper bound depends on number of days in month) -- **hour** (number, *optional*): 0..23 -- **minute** (number, *optional*): 0..59 -- **second** (number, *optional*): 0..59 -- **milliseconds** (number, *optional*): 0..999 -- returns **dateString**: date and time expressed according to ISO 8601, in Zulu time - -### DATE_TIMESTAMP() - -`DATE_TIMESTAMP(date) → timestamp` - -Create a timestamp value from *date*. The return value has millisecond precision. -To convert the return value to seconds, divide it by 1000. - -- **date** (number|string): numeric timestamp or ISO 8601 date time string -- returns **timestamp** (number): numeric timestamp - -`DATE_TIMESTAMP(year, month, day, hour, minute, second, millisecond) → timestamp` - -Create a timestamp value, but allows to specify the individual date components -separately. All parameters after *day* are optional. - -- **year** (number): typically in the range 0..9999, e.g. *2017* -- **month** (number): 1..12 for January through December -- **day** (number): 1..31 (upper bound depends on number of days in month) -- **hour** (number, *optional*): 0..23 -- **minute** (number, *optional*): 0..59 -- **second** (number, *optional*): 0..59 -- **milliseconds** (number, *optional*): 0..999 -- returns **timestamp** (number): numeric timestamp - -Negative values are not allowed, result in *null* and cause a warning. -Values greater than the upper range bound overflow to the larger components -(e.g. an hour of 26 is automatically turned into an additional day and two hours): - -```js -DATE_TIMESTAMP(2016, 12, -1) // returns null and issues a warning -DATE_TIMESTAMP(2016, 2, 32) // returns 1456963200000, which is March 3rd, 2016 -DATE_TIMESTAMP(1970, 1, 1, 26) // returns 93600000, which is January 2nd, 1970, at 2 a.m. -``` - -### IS_DATESTRING() - -`IS_DATESTRING(value) → bool` - -Check if an arbitrary string is suitable for interpretation as date time string. - -- **value** (string): an arbitrary string -- returns **bool** (bool): *true* if *value* is a string that can be used - in a date function. This includes partial dates such as *2015* or *2015-10* and - strings containing invalid dates such as *2015-02-31*. The function will return - *false* for all non-string values, even if some of them may be usable in date - functions. - -Processing ----------- - -### DATE_DAYOFWEEK() - -`DATE_DAYOFWEEK(date) → weekdayNumber` - -Return the weekday number of *date*. - -- **date** (number|string): numeric timestamp or ISO 8601 date time string -- returns **weekdayNumber** (number): 0..6 as follows: - - 0 – Sunday - - 1 – Monday - - 2 – Tuesday - - 3 – Wednesday - - 4 – Thursday - - 5 – Friday - - 6 – Saturday - -### DATE_YEAR() - -`DATE_YEAR(date) → year` - -Return the year of *date*. - -- **date** (number|string): numeric timestamp or ISO 8601 date time string -- returns **year** (number): the year part of *date* as a number - -### DATE_MONTH() - -`DATE_MONTH(date) → month` - -Return the month of *date*. - -- **date** (number|string): numeric timestamp or ISO 8601 date time string -- returns **month** (number): the month part of *date* as a number - -### DATE_DAY() - -`DATE_DAY(date) → day` - -Return the day of *date*. - -- **date** (number|string): numeric timestamp or ISO 8601 date time string -- returns **day** (number): the day part of *date* as a number - -### DATE_HOUR() - -Return the hour of *date*. - -`DATE_HOUR(date) → hour` - -- **date** (number|string): numeric timestamp or ISO 8601 date time string -- returns **hour** (number): the hour part of *date* as a number - -### DATE_MINUTE() - -`DATE_MINUTE(date) → minute` - -Return the minute of *date*. - -- **date** (number|string): numeric timestamp or ISO 8601 date time string -- returns **minute** (number): the minute part of *date* as a number - -### DATE_SECOND() - -`DATE_SECOND(date) → second` - -Return the second of *date*. - -- **date** (number|string): numeric timestamp or ISO 8601 date time string -- returns **second** (number): the seconds part of *date* as a number - -### DATE_MILLISECOND() - -`DATE_MILLISECOND(date) → millisecond` - -- **date** (number|string): numeric timestamp or ISO 8601 date time string -- returns **millisecond** (number): the milliseconds part of *date* as a number - -### DATE_DAYOFYEAR() - -`DATE_DAYOFYEAR(date) → dayOfYear` - -Return the day of year of *date*. - -- **date** (number|string): numeric timestamp or ISO 8601 date time string -- returns **dayOfYear** (number): the day of year number of *date*. - The return values range from 1 to 365, or 366 in a leap year respectively. - -### DATE_ISOWEEK() - -`DATE_ISOWEEK(date) → weekDate` - -Return the week date of *date* according to ISO 8601. - -- **date** (number|string): numeric timestamp or ISO 8601 date time string -- returns **weekDate** (number): the ISO week date of *date*. The return values - range from 1 to 53. Monday is considered the first day of the week. There are no - fractional weeks, thus the last days in December may belong to the first week of - the next year, and the first days in January may be part of the previous year's - last week. - -### DATE_LEAPYEAR() - -`DATE_LEAPYEAR(date) → leapYear` - -Return whether *date* is in a leap year. - -- **date** (number|string): numeric timestamp or ISO 8601 date time string -- returns **leapYear** (bool): *true* if *date* is in a leap year, *false* otherwise - -### DATE_QUARTER() - -`DATE_QUARTER(date) → quarter` - -Return which quarter *date* belongs to. - -- **date** (number|string): numeric timestamp or ISO 8601 date time string -- returns **quarter** (number): the quarter of the given date (1-based): - - 1 – January, February, March - - 2 – April, May, June - - 3 – July, August, September - - 4 – October, November, December - -### DATE_DAYS_IN_MONTH() - -Return the number of days in the month of *date*. - -`DATE_DAYS_IN_MONTH(date) → daysInMonth` - -- **date** (number|string): numeric timestamp or ISO 8601 date time string -- returns **daysInMonth** (number): the number of days in *date*'s month (28..31) - -### DATE_TRUNC() - -`DATE_TRUNC(date, unit) → isoDate` - -Truncates the given date after *unit* and returns the modified date. - -- **date** (number|string): numeric timestamp or ISO 8601 date time string -- **unit** (string): either of the following to specify the time unit (case-insensitive): - - y, year, years - - m, month, months - - d, day, days - - h, hour, hours - - i, minute, minutes - - s, second, seconds - - f, millisecond, milliseconds -- returns **isoDate** (string): the truncated ISO 8601 date time string - -```js -DATE_TRUNC('2017-02-03', 'month') // 2017-02-01T00:00:00.000Z -DATE_TRUNC('2017-02-03 04:05:06', 'hours') // 2017-02-03 04:00:00.000Z -``` - -### DATE_FORMAT() - -`DATE_FORMAT(date, format) → str` - -Format a date according to the given format string. - -- **date** (string|number): a date string or timestamp -- **format** (string): a format string, see below -- returns **str** (string): a formatted date string - -*format* supports the following placeholders (case-insensitive): - -- %t – timestamp, in milliseconds since midnight 1970-01-01 -- %z – ISO date (0000-00-00T00:00:00.000Z) -- %w – day of week (0..6) -- %y – year (0..9999) -- %yy – year (00..99), abbreviated (last two digits) -- %yyyy – year (0000..9999), padded to length of 4 -- %yyyyyy – year (-009999 .. +009999), with sign prefix and padded to length of 6 -- %m – month (1..12) -- %mm – month (01..12), padded to length of 2 -- %d – day (1..31) -- %dd – day (01..31), padded to length of 2 -- %h – hour (0..23) -- %hh – hour (00..23), padded to length of 2 -- %i – minute (0..59) -- %ii – minute (00..59), padded to length of 2 -- %s – second (0..59) -- %ss – second (00..59), padded to length of 2 -- %f – millisecond (0..999) -- %fff – millisecond (000..999), padded to length of 3 -- %x – day of year (1..366) -- %xxx – day of year (001..366), padded to length of 3 -- %k – ISO week date (1..53) -- %kk – ISO week date (01..53), padded to length of 2 -- %l – leap year (0 or 1) -- %q – quarter (1..4) -- %a – days in month (28..31) -- %mmm – abbreviated English name of month (Jan..Dec) -- %mmmm – English name of month (January..December) -- %www – abbreviated English name of weekday (Sun..Sat) -- %wwww – English name of weekday (Sunday..Saturday) -- %& – special escape sequence for rare occasions -- %% – literal % -- % – ignored - -`%yyyy` does not enforce a length of 4 for years before 0 and past 9999. -The same format as for `%yyyyyy` will be used instead. `%yy` preserves the -sign for negative years and may thus return 3 characters in total. - -Single `%` characters will be ignored. Use `%%` for a literal `%`. To resolve -ambiguities like in `%mmonth` (unpadded month number + the string "month") -between `%mm` + "onth" and `%m` + "month", use the escape sequence `%&`: -`%m%&month`. - -Note that *DATE_FORMAT()* is a rather costly operation and may not be suitable for large -datasets (like over 1 million dates). If possible, avoid formatting dates on -server-side and leave it up to the client to do so. This function should only -be used for special date comparisons or to store the formatted dates in the -database. For better performance, use the primitive `DATE_*()` functions -together with `CONCAT()` if possible. - -Examples: - -```js -DATE_FORMAT(DATE_NOW(), "%q/%yyyy") // quarter and year (e.g. "3/2015") -DATE_FORMAT(DATE_NOW(), "%dd.%mm.%yyyy %hh:%ii:%ss,%fff") // e.g. "18.09.2015 15:30:49,374" -DATE_FORMAT("1969", "Summer of '%yy") // "Summer of '69" -DATE_FORMAT("2016", "%%l = %l") // "%l = 1" (2016 is a leap year) -DATE_FORMAT("2016-03-01", "%xxx%") // "063", trailing % ignored -``` - -Comparison and calculation --------------------------- - -### DATE_ADD() - -`DATE_ADD(date, amount, unit) → isoDate` - -Add *amount* given in *unit* to *date* and return the calculated date. - -- **date** (number|string): numeric timestamp or ISO 8601 date time string -- **amount** (number|string): number of *unit*s to add (positive value) or - subtract (negative value). It is recommended to use positive values only, - and use [DATE_SUBTRACT()](#datesubtract) for subtractions instead. -- **unit** (string): either of the following to specify the time unit to add or - subtract (case-insensitive): - - y, year, years - - m, month, months - - w, week, weeks - - d, day, days - - h, hour, hours - - i, minute, minutes - - s, second, seconds - - f, millisecond, milliseconds -- returns **isoDate** (string): the calculated ISO 8601 date time string - -```js -DATE_ADD(DATE_NOW(), -1, "day") // yesterday; also see DATE_SUBTRACT() -DATE_ADD(DATE_NOW(), 3, "months") // in three months -DATE_ADD(DATE_ADD("2015-04-01", 5, "years"), 1, "month") // May 1st 2020 -DATE_ADD("2015-04-01", 12*5 + 1, "months") // also May 1st 2020 -DATE_ADD(DATE_TIMESTAMP(DATE_YEAR(DATE_NOW()), 12, 24), -4, "years") // Christmas four years ago -DATE_ADD(DATE_ADD("2016-02", "month", 1), -1, "day") // last day of February (29th, because 2016 is a leap year!) -``` - -`DATE_ADD(date, isoDuration) → isoDate` - -You may also pass an ISO duration string as *amount* and leave out *unit*. - -- **date** (number|string): numeric timestamp or ISO 8601 date time string -- **isoDuration** (string): an ISO 8601 duration string to add to *date*, see below -- returns **isoDate** (string): the calculated ISO 8601 date time string - -The format is `P_Y_M_W_DT_H_M_._S`, where underscores stand for digits and -letters for time intervals - except for the separators `P` (period) and `T` (time). -The meaning of the other letters are: -- Y – years -- M – months (if before T) -- W – weeks -- D – days -- H – hours -- M – minutes (if after T) -- S – seconds (optionally with 3 decimal places for milliseconds) - -The string must be prefixed by a `P`. A separating `T` is only required if -`H`, `M` and/or `S` are specified. You only need to specify the needed pairs -of letters and numbers. - -```js -DATE_ADD(DATE_NOW(), "P1Y") // add 1 year -DATE_ADD(DATE_NOW(), "P3M2W") // add 3 months and 2 weeks -DATE_ADD(DATE_NOW(), "P5DT26H") // add 5 days and 26 hours (=6 days and 2 hours) -DATE_ADD("2000-01-01", "PT4H") // add 4 hours -DATE_ADD("2000-01-01", "PT30M44.4S" // add 30 minutes, 44 seconds and 400 ms -DATE_ADD("2000-01-01", "P1Y2M3W4DT5H6M7.89S" // add a bit of everything -``` - -### DATE_SUBTRACT() - -`DATE_SUBTRACT(date, amount, unit) → isoDate` - -Subtract *amount* given in *unit* from *date* and return the calculated date. - -It works the same as [DATE_ADD()](#dateadd), except that it subtracts. It is -equivalent to calling *DATE_ADD()* with a negative amount, except that -*DATE_SUBTRACT()* can also subtract ISO durations. Note that negative ISO -durations are not supported (i.e. starting with `-P`, like `-P1Y`). - -- **date** (number|string): numeric timestamp or ISO 8601 date time string -- **amount** (number|string): number of *unit*s to subtract (positive value) or - add (negative value). It is recommended to use positive values only, - and use [DATE_ADD()](#dateadd) for additions instead. -- **unit** (string): either of the following to specify the time unit to add or - subtract (case-insensitive): - - y, year, years - - m, month, months - - w, week, weeks - - d, day, days - - h, hour, hours - - i, minute, minutes - - s, second, seconds - - f, millisecond, milliseconds -- returns **isoDate** (string): the calculated ISO 8601 date time string - -`DATE_SUBTRACT(date, isoDuration) → isoDate` - -You may also pass an ISO duration string as *amount* and leave out *unit*. - -- **date** (number|string): numeric timestamp or ISO 8601 date time string -- **isoDuration** (string): an ISO 8601 duration string to subtract from *date*, - see below -- returns **isoDate** (string): the calculated ISO 8601 date time string - -The format is `P_Y_M_W_DT_H_M_._S`, where underscores stand for digits and -letters for time intervals - except for the separators `P` (period) and `T` (time). -The meaning of the other letters are: -- Y – years -- M – months (if before T) -- W – weeks -- D – days -- H – hours -- M – minutes (if after T) -- S – seconds (optionally with 3 decimal places for milliseconds) - -The string must be prefixed by a `P`. A separating `T` is only required if -`H`, `M` and/or `S` are specified. You only need to specify the needed pairs -of letters and numbers. - -```js -DATE_SUBTRACT(DATE_NOW(), 1, "day") // yesterday -DATE_SUBTRACT(DATE_TIMESTAMP(DATE_YEAR(DATE_NOW()), 12, 24), 4, "years") // Christmas four years ago -DATE_SUBTRACT(DATE_ADD("2016-02", "month", 1), 1, "day") // last day of February (29th, because 2016 is a leap year!) -DATE_SUBTRACT(DATE_NOW(), "P4D") // four days ago -DATE_SUBTRACT(DATE_NOW(), "PT1H3M") // 1 hour and 30 minutes ago -``` - -### DATE_DIFF() - -`DATE_DIFF(date1, date2, unit, asFloat) → diff` - -Calculate the difference between two dates in given time *unit*, optionally -with decimal places. - -- **date1** (number|string): numeric timestamp or ISO 8601 date time string -- **date2** (number|string): numeric timestamp or ISO 8601 date time string -- **unit** (string): either of the following to specify the time unit to return the - difference in (case-insensitive): - - y, year, years - - m, month, months - - w, week, weeks - - d, day, days - - h, hour, hours - - i, minute, minutes - - s, second, seconds - - f, millisecond, milliseconds -- **asFloat** (boolean, *optional*): if set to *true*, decimal places will be - preserved in the result. The default is *false* and an integer is returned. -- returns **diff** (number): the calculated difference as number in *unit*. - The value will be negative if *date2* is before *date1*. - -### DATE_COMPARE() - -`DATE_COMPARE(date1, date2, unitRangeStart, unitRangeEnd) → bool` - -Check if two partial dates match. - -- **date1** (number|string): numeric timestamp or ISO 8601 date time string -- **date2** (number|string): numeric timestamp or ISO 8601 date time string -- **unitRangeStart** (string): unit to start from, see below -- **unitRangeEnd** (string, *optional*): unit to end with, leave out to only - compare the component as specified by *unitRangeStart*. An error is raised if - *unitRangeEnd* is a unit before *unitRangeStart*. -- returns **bool** (bool): *true* if the dates match, *false* otherwise - -The parts to compare are defined by a range of time units. The full range is: -years, months, days, hours, minutes, seconds, milliseconds (in this order). - -All components of *date1* and *date2* as specified by the range will be compared. -You can refer to the units as: - -- y, year, years -- m, month, months -- d, day, days -- h, hour, hours -- i, minute, minutes -- s, second, seconds -- f, millisecond, milliseconds - -```js -// Compare months and days, true on birthdays if you're born on 4th of April -DATE_COMPARE("1985-04-04", DATE_NOW(), "months", "days") - -// Will only match on one day if the current year is a leap year! -// You may want to add or subtract one day from date1 to match every year. -DATE_COMPARE("1984-02-29", DATE_NOW(), "months", "days") - -// compare years, months and days (true, because it's the same day) -DATE_COMPARE("2001-01-01T15:30:45.678Z", "2001-01-01T08:08:08.008Z", "years", "days") -``` - -You can directly compare ISO date **strings** if you want to find dates before or -after a certain date, or in between two dates (`>=`, `>`, `<`, `<=`). -No special date function is required. Equality tests (`==` and `!=`) will only -match the exact same date and time however. You may use `SUBSTRING()` to -compare partial date strings, `DATE_COMPARE()` is basically a convenience -function for that. However, neither is really required to limit a search to a -certain day as demonstrated here: - -```js -FOR doc IN coll - FILTER doc.date >= "2015-05-15" AND doc.date < "2015-05-16" - RETURN doc -``` - -Every ISO date on that day is greater than or equal to `2015-05-15` in a string -comparison (e.g. `2015-05-15T11:30:00.000Z`). Dates before `2015-05-15` are smaller -and therefore filtered out by the first condition. Every date past `2015-05-15` is -greater than this date in a string comparison, and therefore filtered out by the -second condition. The result is that the time components in the dates you compare -with are "ignored". The query will return every document with *date* ranging from -`2015-05-15T00:00:00.000Z` to `2015-05-15T23:99:99.999Z`. It would also include -`2015-05-15T24:00:00.000Z`, but that date is actually `2015-05-16T00:00:00.000Z` -and can only occur if inserted manually (you may want to pass dates through -[DATE_ISO8601()](#dateiso8601) to ensure a correct date representation). - -Leap days in leap years (29th of February) must be always handled manually, -if you require so (e.g. birthday checks): - -```js -LET today = DATE_NOW() -LET noLeapYear = NOT DATE_LEAPYEAR(today) - -FOR user IN users - LET birthday = noLeapYear AND - DATE_MONTH(user.birthday) == 2 AND - DATE_DAY(user.birthday) == 29 - ? DATE_SUBTRACT(user.birthday, 1, "day") /* treat like 28th in non-leap years */ - : user.birthday - FILTER DATE_COMPARE(today, birthday, "month", "day") - /* includes leaplings on the 28th of February in non-leap years, - * but excludes them in leap years which do have a 29th February. - * Replace DATE_SUBTRACT() by DATE_ADD() to include them on the 1st of March - * in non-leap years instead (depends on local jurisdiction). - */ - RETURN user -``` - -Working with dates and indices ------------------------------- - -There are two recommended ways to store timestamps in ArangoDB: - - string: UTC timestamp with [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) - - number: [unix timestamp](https://en.wikipedia.org/wiki/Unix_time) with millisecond precision - -The sort order of both is identical due to the sort properties of ISO date strings. -You can't mix both types, numbers and strings, in a single attribute however. - -You can use [skiplist indices](../../Manual/Indexing/Skiplist.html) with both date types. -When chosing string representations, you can work with string comparisons (less than, -greater than etc.) to express time ranges in your queries while still utilizing -skiplist indices: - - @startDocuBlockInline working_with_date_time - @EXAMPLE_ARANGOSH_OUTPUT{working_with_date_time} - db._create("exampleTime"); - var timestamps = ["2014-05-07T14:19:09.522","2014-05-07T21:19:09.522","2014-05-08T04:19:09.522","2014-05-08T11:19:09.522","2014-05-08T18:19:09.522"]; - for (i = 0; i < 5; i++) db.exampleTime.save({value:i, ts: timestamps[i]}) - db._query("FOR d IN exampleTime FILTER d.ts > '2014-05-07T14:19:09.522' and d.ts < '2014-05-08T18:19:09.522' RETURN d").toArray() - ~addIgnoreCollection("example") - ~db._drop("exampleTime") - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock working_with_date_time - -The first and the last timestamp in the array are excluded from the result by the `FILTER`. - -Limitations ------------ - -Note that dates before the year 1583 aren't allowed by the -[ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) standard by default, because -they lie before the official introduction of the Gregorian calendar and may thus -be incorrect or invalid. All AQL date functions apply the same rules to every -date according to the Gregorian calendar system, even if inappropriate. That -does not constitute a problem, unless you deal with dates prior to 1583 and -especially years before Christ. The standard allows negative years, but requires -special treatment of positive years too, if negative years are used (e.g. -`+002015-05-15` and `-000753-01-01`). This is rarely used however, and AQL does -not use the 7-character version for years between 0 and 9999 in ISO strings. -Keep in mind that they can't be properly compared to dates outside that range. -Sorting of negative dates does not result in a meaningful order, with years longer -ago last, but months, days and the time components in otherwise correct order. - -Leap seconds are ignored, just as they are in JavaScript as per -[ECMAScript Language Specifications](http://www.ecma-international.org/ecma-262/5.1/#sec-15.9.1.1). diff --git a/Documentation/Books/AQL/Functions/Document.md b/Documentation/Books/AQL/Functions/Document.md deleted file mode 100644 index 6906a45cfabc..000000000000 --- a/Documentation/Books/AQL/Functions/Document.md +++ /dev/null @@ -1,465 +0,0 @@ -Document functions -================== - -AQL provides below listed functions to operate on objects / document values. -Also see [object access](../Fundamentals/DataTypes.md#objects--documents) for -additional language constructs. - -ATTRIBUTES() ------------- - -`ATTRIBUTES(document, removeInternal, sort) → strArray` - -Return the top-level attribute keys of the *document* as an array. -Optionally omit system attributes and sort the array. - -- **document** (object): an arbitrary document / object -- **removeInternal** (bool, *optional*): whether all system attributes (*_key*, *_id* etc., - every attribute key that starts with an underscore) shall be omitted in the result. - The default is *false*. -- **sort** (bool, *optional*): optionally sort the resulting array alphabetically. - The default is *false* and will return the attribute names in any order. -- returns **strArray** (array): the attribute keys of the input *document* as an - array of strings - -```js -ATTRIBUTES( { "foo": "bar", "_key": "123", "_custom": "yes" } ) -// [ "foo", "_key", "_custom" ] - -ATTRIBUTES( { "foo": "bar", "_key": "123", "_custom": "yes" }, true ) -// [ "foo" ] - -ATTRIBUTES( { "foo": "bar", "_key": "123", "_custom": "yes" }, false, true ) -// [ "_custom", "_key", "foo" ] -``` - -Complex example to count how often every attribute key occurs in the documents -of *collection* (expensive on large collections): - -```js -LET attributesPerDocument = ( - FOR doc IN collection RETURN ATTRIBUTES(doc, true) -) -FOR attributeArray IN attributesPerDocument - FOR attribute IN attributeArray - COLLECT attr = attribute WITH COUNT INTO count - SORT count DESC - RETURN {attr, count} -``` - -COUNT() -------- - -This is an alias for [LENGTH()](#length). - -HAS() ------ - -`HAS(document, attributeName) → isPresent` - -Test whether an attribute is present in the provided document. - -- **document** (object): an arbitrary document / object -- **attributeName** (string): the attribute key to test for -- returns **isPresent** (bool): *true* if *document* has an attribute named - *attributeName*, and *false* otherwise. An attribute with a falsy value (*0*, *false*, - empty string *""*) or *null* is also considered as present and returns *true*. - -```js -HAS( { name: "Jane" }, "name" ) // true -HAS( { name: "Jane" }, "age" ) // false -HAS( { name: null }, "name" ) // true -``` - -Note that the function checks if the specified attribute exists. This is different -from similar ways to test for the existance of an attribute, in case the attribute -has a falsy value or is not present (implicitly *null* on object access): - -```js -!!{ name: "" }.name // false -HAS( { name: "" }, "name") // true - -{ name: null }.name == null // true -{ }.name == null // true -HAS( { name: null }, "name" ) // true -HAS( { }, "name" ) // false -``` - -Note that `HAS()` can not utilize indexes. If it's not necessary to distinguish -between explicit and implicit *null* values in your query, you may use an equality -comparison to test for *null* and create a non-sparse index on the attribute you -want to test against: - -```js -FILTER !HAS(doc, "name") // can not use indexes -FILTER IS_NULL(doc, "name") // can not use indexes -FILTER doc.name == null // can utilize non-sparse indexes -``` - -IS_SAME_COLLECTION() --------------------- - -`IS_SAME_COLLECTION(collectionName, documentHandle) → bool` - - collection id as the collection specified in *collection*. *document* can either be - a [document handle](../../Manual/Appendix/Glossary.html#document-handle) string, or a document with - an *_id* attribute. The function does not validate whether the collection actually - contains the specified document, but only compares the name of the specified collection - with the collection name part of the specified document. - If *document* is neither an object with an *id* attribute nor a *string* value, - the function will return *null* and raise a warning. - -- **collectionName** (string): the name of a collection as string -- **documentHandle** (string|object): a document identifier string (e.g. *_users/1234*) - or a regular document from a collection. Passing either a non-string or a non-document - or a document without an *_id* attribute will result in an error. -- returns **bool** (bool): return *true* if the collection of *documentHandle* is the same - as *collectionName*, otherwise *false* - -```js -// true -IS_SAME_COLLECTION( "_users", "_users/my-user" ) -IS_SAME_COLLECTION( "_users", { _id: "_users/my-user" } ) - -// false -IS_SAME_COLLECTION( "_users", "foobar/baz") -IS_SAME_COLLECTION( "_users", { _id: "something/else" } ) -``` - -KEEP() ------- - -`KEEP(document, attributeName1, attributeName2, ... attributeNameN) → doc` - -Keep only the attributes *attributeName* to *attributeNameN* of *document*. -All other attributes will be removed from the result. - -To do the opposite, see [UNSET()](#unset). - -- **document** (object): a document / object -- **attributeNames** (string, *repeatable*): an arbitrary number of attribute - names as multiple arguments -- returns **doc** (object): a document with only the specified attributes on - the top-level - -```js -KEEP(doc, "firstname", "name", "likes") -``` - -`KEEP(document, attributeNameArray) → doc` - -- **document** (object): a document / object -- **attributeNameArray** (array): an array of attribute names as strings -- returns **doc** (object): a document with only the specified attributes on - the top-level - -```js -KEEP(doc, [ "firstname", "name", "likes" ]) -``` - -LENGTH() --------- - -`LENGTH(doc) → attrCount` - -Determine the number of attribute keys of an object / document. - -- **doc** (object): a document / object -- returns **attrCount** (number): the number of attribute keys in *doc*, regardless - of their values - -*LENGTH()* can also determine the [number of elements](Array.md#length) in an array, -the [amount of documents](Miscellaneous.md#length) in a collection and -the [character length](String.md#length) of a string. - -MATCHES() ---------- - -`MATCHES(document, examples, returnIndex) → match` - -Compare the given *document* against each example document provided. The comparisons -will be started with the first example. All attributes of the example will be compared -against the attributes of *document*. If all attributes match, the comparison stops -and the result is returned. If there is a mismatch, the function will continue the -comparison with the next example until there are no more examples left. - -The *examples* can be an array of 1..n example documents or a single document, -with any number of attributes each. - -Note that *MATCHES()* can not utilize indexes. - -- **document** (object): document to determine whether it matches any example -- **examples** (object|array): a single document, or an array of documents to compare - against. Specifying an empty array is not allowed. -- **returnIndex** (bool): by setting this flag to *true*, the index of the example that - matched will be returned (starting at offset 0), or *-1* if there was no match. - The default is *false* and makes the function return a boolean. -- returns **match** (bool|number): if *document* matches one of the examples, *true* is - returned, otherwise *false*. A number is returned instead if *returnIndex* is used. - -```js -LET doc = { - name: "jane", - age: 27, - active: true -} -RETURN MATCHES(doc, { age: 27, active: true } ) -``` - -This will return *true*, because all attributes of the example are present in the document. - -```js -RETURN MATCHES( - { "test": 1 }, - [ - { "test": 1, "foo": "bar" }, - { "foo": 1 }, - { "test": 1 } - ], true) -``` - -This will return *2*, because the third example matches, and because the -*returnIndex* flag is set to *true*. - -MERGE() -------- - -`MERGE(document1, document2, ... documentN) → mergedDocument` - -Merge the documents *document1* to *documentN* into a single document. -If document attribute keys are ambiguous, the merged result will contain the values -of the documents contained later in the argument list. - - -- **documents** (object, *repeatable*): an arbitrary number of documents as - multiple arguments (at least 2) -- returns **mergedDocument** (object): a combined document - -Note that merging will only be done for top-level attributes. If you wish to -merge sub-attributes, use [MERGE_RECURSIVE()](#mergerecursive) instead. - -Two documents with distinct attribute names can easily be merged into one: - -```js -MERGE( - { "user1": { "name": "Jane" } }, - { "user2": { "name": "Tom" } } -) -// { "user1": { "name": "Jane" }, "user2": { "name": "Tom" } } -``` - -When merging documents with identical attribute names, the attribute values of the -latter documents will be used in the end result: - -```js -MERGE( - { "users": { "name": "Jane" } }, - { "users": { "name": "Tom" } } -) -// { "users": { "name": "Tom" } } -``` - -`MERGE(docArray) → mergedDocument` - -*MERGE* works with a single array parameter, too. This variant allows combining the -attributes of multiple objects in an array into a single object. - -- **docArray** (array): an array of documents, as sole argument -- returns **mergedDocument** (object): a combined document - -```js -MERGE( - [ - { foo: "bar" }, - { quux: "quetzalcoatl", ruled: true }, - { bar: "baz", foo: "done" } - ] -) -``` - -This will now return: - -```js -{ - "foo": "done", - "quux": "quetzalcoatl", - "ruled": true, - "bar": "baz" -} -``` - -MERGE_RECURSIVE() ------------------ - -`MERGE_RECURSIVE(document1, document2, ... documentN) → mergedDocument` - -Recursively merge the documents *document1* to *documentN* into a single document. -If document attribute keys are ambiguous, the merged result will contain the values -of the documents contained later in the argument list. - -- **documents** (object, *repeatable*): an arbitrary number of documents as - multiple arguments (at least 2) -- returns **mergedDocument** (object): a combined document - -For example, two documents with distinct attribute names can easily be merged into one: - -```js -MERGE_RECURSIVE( - { "user-1": { "name": "Jane", "livesIn": { "city": "LA" } } }, - { "user-1": { "age": 42, "livesIn": { "state": "CA" } } } -) -// { "user-1": { "name": "Jane", "livesIn": { "city": "LA", "state": "CA" }, "age": 42 } } -``` - -*MERGE_RECURSIVE()* does not support the single array parameter variant that *MERGE* offers. - -PARSE_IDENTIFIER() ------------------- - -`PARSE_IDENTIFIER(documentHandle) → parts` - -Parse a [document handle](../../Manual/Appendix/Glossary.html#document-handle) and return its -individual parts as separate attributes. - -This function can be used to easily determine the -[collection name](../../Manual/Appendix/Glossary.html#collection-name) and key of a given document. - -- **documentHandle** (string|object): a document identifier string (e.g. *_users/1234*) - or a regular document from a collection. Passing either a non-string or a non-document - or a document without an *_id* attribute will result in an error. -- returns **parts** (object): an object with the attributes *collection* and *key* - -```js -PARSE_IDENTIFIER("_users/my-user") -// { "collection": "_users", "key": "my-user" } - -PARSE_IDENTIFIER( { "_id": "mycollection/mykey", "value": "some value" } ) -// { "collection": "mycollection", "key": "mykey" } -``` - -TRANSLATE() ------------ - -`TRANSLATE(value, lookupDocument, defaultValue) → mappedValue` - -Look up the specified *value* in the *lookupDocument*. If *value* is a key in -*lookupDocument*, then *value* will be replaced with the lookup value found. -If *value* is not present in *lookupDocument*, then *defaultValue* will be returned -if specified. If no *defaultValue* is specified, *value* will be returned unchanged. - -- **value** (string): the value to encode according to the mapping -- **lookupDocument** (object): a key/value mapping as document -- **defaultValue** (any, *optional*): a fallback value in case *value* is not found -- returns **mappedValue** (any): the encoded value, or the unaltered *value* or *defaultValue* - (if supplied) in case it couldn't be mapped - -```js -TRANSLATE("FR", { US: "United States", UK: "United Kingdom", FR: "France" } ) -// "France" - -TRANSLATE(42, { foo: "bar", bar: "baz" } ) -// 42 - -TRANSLATE(42, { foo: "bar", bar: "baz" }, "not found!") -// "not found!" -``` - -UNSET() -------- - -`UNSET(document, attributeName1, attributeName2, ... attributeNameN) → doc` - -Remove the attributes *attributeName1* to *attributeNameN* from *document*. -All other attributes will be preserved. - -To do the opposite, see [KEEP()](#keep). - -- **document** (object): a document / object -- **attributeNames** (string, *repeatable*): an arbitrary number of attribute - names as multiple arguments (at least 1) -- returns **doc** (object): *document* without the specified attributes on the - top-level - -```js -UNSET( doc, "_id", "_key", "foo", "bar" ) -``` - -`UNSET(document, attributeNameArray) → doc` - -- **document** (object): a document / object -- **attributeNameArray** (array): an array of attribute names as strings -- returns **doc** (object): *document* without the specified attributes on the - top-level - -```js -UNSET( doc, [ "_id", "_key", "foo", "bar" ] ) -``` - -UNSET_RECURSIVE() ------------------ - -`UNSET_RECURSIVE(document, attributeName1, attributeName2, ... attributeNameN) → doc` - -Recursively remove the attributes *attributeName1* to *attributeNameN* from -*document* and its sub-documents. All other attributes will be preserved. - -- **document** (object): a document / object -- **attributeNames** (string, *repeatable*): an arbitrary number of attribute - names as multiple arguments (at least 1) -- returns **doc** (object): *document* without the specified attributes on - all levels (top-level as well as nested objects) - -```js -UNSET_RECURSIVE( doc, "_id", "_key", "foo", "bar" ) -``` - -`UNSET_RECURSIVE(document, attributeNameArray) → doc` - -- **document** (object): a document / object -- **attributeNameArray** (array): an array of attribute names as strings -- returns **doc** (object): *document* without the specified attributes on - all levels (top-level as well as nested objects) - -```js -UNSET_RECURSIVE( doc, [ "_id", "_key", "foo", "bar" ] ) -``` - -VALUES() --------- - -`VALUES(document, removeInternal) → anyArray` - -Return the attribute values of the *document* as an array. Optionally omit -system attributes. - -- **document** (object): a document / object -- **removeInternal** (bool, *optional*): if set to *true*, then all internal attributes - (such as *_id*, *_key* etc.) are removed from the result -- returns **anyArray** (array): the values of *document* returned in any order - -```js -VALUES( { "_key": "users/jane", "name": "Jane", "age": 35 } ) -// [ "Jane", 35, "users/jane" ] - -VALUES( { "_key": "users/jane", "name": "Jane", "age": 35 }, true ) -// [ "Jane", 35 ] -``` - -ZIP() ------ - -`ZIP(keys, values) → doc` - -Return a document object assembled from the separate parameters *keys* and *values*. - -*keys* and *values* must be arrays and have the same length. - -- **keys** (array): an array of strings, to be used as attribute names in the result -- **values** (array): an array with elements of arbitrary types, to be used as - attribute values -- returns **doc** (object): a document with the keys and values assembled - -```js -ZIP( [ "name", "active", "hobbies" ], [ "some user", true, [ "swimming", "riding" ] ] ) -// { "name": "some user", "active": true, "hobbies": [ "swimming", "riding" ] } -``` diff --git a/Documentation/Books/AQL/Functions/Fulltext.md b/Documentation/Books/AQL/Functions/Fulltext.md deleted file mode 100644 index 764b6b6db4fd..000000000000 --- a/Documentation/Books/AQL/Functions/Fulltext.md +++ /dev/null @@ -1,76 +0,0 @@ -Fulltext functions -================== - -AQL offers the following functions to filter data based on -[fulltext indexes](../../Manual/Indexing/Fulltext.html). - -FULLTEXT() ----------- - -`FULLTEXT(coll, attribute, query, limit) → docArray` - -Return all documents from collection *coll*, for which the attribute *attribute* -matches the fulltext search phrase *query*, optionally capped to *limit* results. - -**Note**: the *FULLTEXT()* function requires the collection *coll* to have a -fulltext index on *attribute*. If no fulltext index is available, this function -will fail with an error at runtime. It doesn't fail when explaining the query however. - -- **coll** (collection): a collection -- **attribute** (string): the attribute name of the attribute to search in -- **query** (string): a fulltext search expression as described below -- **limit** (number, *optional*): if set to a non-zero value, it will cap the result - to at most this number of documents -- returns **docArray** (array): an array of documents - -*FULLTEXT()* is not meant to be used as an argument to *FILTER*, -but rather to be used as the expression of a *FOR* statement: - -```js -FOR oneMail IN FULLTEXT(emails, "body", "banana,-apple") - RETURN oneMail._id -``` - -*query* is a comma-separated list of sought words (or prefixes of sought words). To -distinguish between prefix searches and complete-match searches, each word can optionally be -prefixed with either the *prefix:* or *complete:* qualifier. Different qualifiers can -be mixed in the same query. Not specifying a qualifier for a search word will implicitly -execute a complete-match search for the given word: - -- *FULLTEXT(emails, "body", "banana")* Will look for the word *banana* in the - attribute *body* of the collection *collection*. - -- *FULLTEXT(emails, "body", "banana,orange")* Will look for both words - *banana* and *orange* in the mentioned attribute. Only those documents will be - returned that contain both words. - -- *FULLTEXT(emails, "body", "prefix:head")* Will look for documents that contain any - words starting with the prefix *head*. - -- *FULLTEXT(emails, "body", "prefix:head,complete:aspirin")* Will look for all - documents that contain a word starting with the prefix *head* and that also contain - the (complete) word *aspirin*. Note: specifying *complete* is optional here. - -- *FULLTEXT(emails, "body", "prefix:cent,prefix:subst")* Will look for all documents - that contain a word starting with the prefix *cent* and that also contain a word - starting with the prefix *subst*. - -If multiple search words (or prefixes) are given, then by default the results will be -AND-combined, meaning only the logical intersection of all searches will be returned. -It is also possible to combine partial results with a logical OR, and with a logical NOT: - -- *FULLTEXT(emails, "body", "+this,+text,+document")* Will return all documents that - contain all the mentioned words. Note: specifying the *+* symbols is optional here. - -- *FULLTEXT(emails, "body", "banana,|apple")* Will return all documents that contain - either (or both) words *banana* or *apple*. - -- *FULLTEXT(emails, "body", "banana,-apple")* Will return all documents that contain - the word *banana*, but do not contain the word *apple*. - -- *FULLTEXT(emails, "body", "banana,pear,-cranberry")* Will return all documents that - contain both the words *banana* and *pear*, but do not contain the word - *cranberry*. - -No precedence of logical operators will be honored in a fulltext query. The query will simply -be evaluated from left to right. diff --git a/Documentation/Books/AQL/Functions/Geo.md b/Documentation/Books/AQL/Functions/Geo.md deleted file mode 100644 index 3b2349522049..000000000000 --- a/Documentation/Books/AQL/Functions/Geo.md +++ /dev/null @@ -1,440 +0,0 @@ -Geo functions -============= - -Geo utility functions ---------------------- - -The following helper functions **can** use geo indexes, but do not have to in -all cases. You can use all of these functions in combination with each other, -and if you have configured a geo index it may be utilized, -see [Geo Indexing](../../Manual/Indexing/Geo.html). - -### DISTANCE() - -`DISTANCE(latitude1, longitude1, latitude2, longitude2) → distance` - -Calculate the distance between two arbitrary coordinates in meters (as birds -would fly). The value is computed using the haversine formula, which is based -on a spherical Earth model. It's fast to compute and is accurate to around 0.3%, -which is sufficient for most use cases such as location-aware services. - -- **latitude1** (number): the latitude portion of the first coordinate -- **longitude1** (number): the longitude portion of the first coordinate -- **latitude2** (number): the latitude portion of the second coordinate -- **longitude2** (number): the longitude portion of the second coordinate -- returns **distance** (number): the distance between both coordinates in **meters** - -```js -// Distance from Brandenburg Gate (Berlin) to ArangoDB headquarters (Cologne) -DISTANCE(52.5163, 13.3777, 50.9322, 6.94) // 476918.89688380965 (~477km) - -// Sort a small number of documents based on distance to Central Park (New York) -FOR doc IN doc // e.g. documents returned by a traversal - SORT DISTANCE(doc.latitude, doc.longitude, 40.78, -73.97) - RETURN doc -``` - -### GEO_CONTAINS() - -Introduced in: v3.4.0 - -`GEO_CONTAINS(geoJsonA, geoJsonB) → bool` - -Checks whether the [GeoJSON object](../../Manual/Indexing/Geo.html#geojson) `geoJsonA` -fully contains `geoJsonB` (Every point in B is also in A). The object `geoJsonA` has to be of type -`Polygon` or `MultiPolygon`, other types are not supported because containment is ill defined. -This function can be **optimized** by a S2 based [geospatial index](../../Manual/Indexing/Geo.html). - -- **geoJsonA** (object): first GeoJSON object or coordinate array (in longitude, latitude order) -- **geoJsonB** (object): second GeoJSON object or coordinate array (in longitude, latitude order) -- returns **bool** (bool): true when every point in B is also contained in A, false otherwise - - -### GEO_DISTANCE() - -Introduced in: v3.4.0 - -`GEO_DISTANCE(geoJsonA, geoJsonB) → distance` - -Return the distance between two GeoJSON objects, measured from the **centroid** -of each shape. For a list of supported types see the -[geo index page](../../Manual/Indexing/Geo.html#geojson). - -- **geoJsonA** (object): first GeoJSON object -- **geoJsonB** (object): second GeoJSON object -- returns **distance** (number): the distance between the centroid points of - the two objects - -```js -LET polygon = { - type: "Polygon", - coordinates: [[[-11.5, 23.5], [-10.5, 26.1], [-11.2, 27.1], [-11.5, 23.5]]] -} -FOR doc IN collectionName - LET distance = GEO_DISTANCE(doc.geometry, polygon) // calculates the distance - RETURN distance -``` - -### GEO_EQUALS() - -Introduced in: v3.4.0 - -`GEO_EQUALS(geoJsonA, geoJsonB) → bool` - -Checks whether two GeoJSON objects are equal or not. For a list of supported -types see the [geo index page](../../Manual/Indexing/Geo.html#geojson). - -- **geoJsonA** (object): first GeoJSON object -- **geoJsonB** (object): second GeoJSON object. -- returns **bool** (bool): true for equality. - -```js -LET polygonA = GEO_POLYGON([ - [-11.5, 23.5], [-10.5, 26.1], [-11.2, 27.1], [-11.5, 23.5] -]) -LET polygonB = GEO_POLYGON([ - [-11.5, 23.5], [-10.5, 26.1], [-11.2, 27.1], [-11.5, 23.5] -]) -RETURN GEO_EQUALS(polygonA, polygonB) // true -``` - -```js -LET polygonA = GEO_POLYGON([ - [-11.1, 24.0], [-10.5, 26.1], [-11.2, 27.1], [-11.1, 24.0] -]) -LET polygonB = GEO_POLYGON([ - [-11.5, 23.5], [-10.5, 26.1], [-11.2, 27.1], [-11.5, 23.5] -]) -RETURN GEO_EQUALS(polygonA, polygonB) // false -``` - -### GEO_INTERSECTS() - -Introduced in: v3.4.0 - -`GEO_INTERSECTS(geoJsonA, geoJsonB) → bool` - -Checks whether the [GeoJSON object](../../Manual/Indexing/Geo.html#geojson) `geoJsonA` -intersects with `geoJsonB` (i.e. at least one point in B is also A or vice-versa). -This function can be **optimized** by a S2 based [geospatial index](../../Manual/Indexing/Geo.html). - -- **geoJsonA** (object): first GeoJSON object -- **geoJsonB** (object): second GeoJSON object. -- returns **bool** (bool): true if B intersects A, false otherwise - -### IS_IN_POLYGON() - -Determine whether a coordinate is inside a polygon. - -{% hint 'warning' %} -The *IS_IN_POLYGON* AQL function is **deprecated** as of ArangoDB 3.4.0 in -favor of the new `GEO_CONTAINS` AQL function, which works with -[GeoJSON](https://tools.ietf.org/html/rfc7946) Polygons and MultiPolygons. -{% endhint %} - -`IS_IN_POLYGON(polygon, latitude, longitude) → bool` - -- **polygon** (array): an array of arrays with 2 elements each, representing the - points of the polygon in the format *[lat, lon]* -- **latitude** (number): the latitude portion of the search coordinate -- **longitude** (number): the longitude portion of the search coordinate -- returns **bool** (bool): *true* if the point (*latitude*, *longitude*) is - inside the *polygon* or *false* if it's not. The result is undefined (can be - *true* or *false*) if the specified point is exactly on a boundary of the - polygon. - -```js -// will check if the point (lat 4, lon 7) is contained inside the polygon -IS_IN_POLYGON( [ [ 0, 0 ], [ 0, 10 ], [ 10, 10 ], [ 10, 0 ] ], 4, 7 ) -``` - -`IS_IN_POLYGON(polygon, coord, useLonLat) → bool` - -The 2nd parameter can alternatively be specified as an array with two values. - -By default, each array element in *polygon* is expected to be in the format -*[lat, lon]*. This can be changed by setting the 3rd parameter to *true* to -interpret the points as *[lon, lat]*. *coord* will then also be interpreted in -the same way. - -- **polygon** (array): an array of arrays with 2 elements each, representing the - points of the polygon -- **coord** (array): the search coordinate as a number array with two elements -- **useLonLat** (bool, *optional*): if set to *true*, the coordinates in - *polygon* and the search coordinate *coord* will be interpreted as - *[lon, lat]* (GeoJSON). The default is *false* and the format *[lat, lon]* is - expected. -- returns **bool** (bool): *true* if the point *coord* is inside the *polygon* - or *false* if it's not. The result is undefined (can be *true* or *false*) if - the specified point is exactly on a boundary of the polygon. - -```js -// will check if the point (lat 4, lon 7) is contained inside the polygon -IS_IN_POLYGON( [ [ 0, 0 ], [ 0, 10 ], [ 10, 10 ], [ 10, 0 ] ], [ 4, 7 ] ) - -// will check if the point (lat 4, lon 7) is contained inside the polygon -IS_IN_POLYGON( [ [ 0, 0 ], [ 10, 0 ], [ 10, 10 ], [ 0, 10 ] ], [ 7, 4 ], true ) -``` - -GeoJSON Constructors ---------------------- - -Introduced in: v3.4.0 - -The following helper functions are available to easily create valid GeoJSON -output. In all cases you can write equivalent JSON yourself, but these functions -will help you to make all your AQL queries shorter and easier to read. - -### GEO_LINESTRING() - -`GEO_LINESTRING(points) → geoJson` - -Construct a GeoJSON LineString. -Needs at least two longitude/latitude pairs. - -- **points** (array): number array of longitude/latitude pairs -- returns **geoJson** (object): a valid GeoJSON LineString - -@startDocuBlockInline aqlGeoLineString_1 -@EXAMPLE_AQL{aqlGeoLineString_1} -RETURN GEO_LINESTRING([ - [35, 10], [45, 45] -]) -@END_EXAMPLE_AQL -@endDocuBlock aqlGeoLineString_1 - -### GEO_MULTILINESTRING() - -`GEO_MULTILINESTRING(points) → geoJson` - -Construct a GeoJSON MultiLineString. -Needs at least two elements consisting valid LineStrings coordinate arrays. - -- **points** (array): array of LineStrings -- returns **geoJson** (object): a valid GeoJSON MultiLineString - -@startDocuBlockInline aqlGeoMultiLineString_1 -@EXAMPLE_AQL{aqlGeoMultiLineString_1} -RETURN GEO_MULTILINESTRING([ - [[100.0, 0.0], [101.0, 1.0]], - [[102.0, 2.0], [101.0, 2.3]] -]) -@END_EXAMPLE_AQL -@endDocuBlock aqlGeoMultiLineString_1 - -### GEO_MULTIPOINT() - -`GEO_MULTIPOINT(points) → geoJson` - -Construct a GeoJSON LineString. Needs at least two longitude/latitude pairs. - -- **points** (array): number array of longitude/latitude pairs -- returns **geoJson** (object): a valid GeoJSON Point - -@startDocuBlockInline aqlGeoMultiPoint_1 -@EXAMPLE_AQL{aqlGeoMultiPoint_1} -RETURN GEO_MULTIPOINT([ - [35, 10], [45, 45] -]) -@END_EXAMPLE_AQL -@endDocuBlock aqlGeoMultiPoint_1 - -### GEO_POINT() - -`GEO_POINT(longitude, latitude) → geoJson` - -Construct a valid GeoJSON Point. - -- **longitude** (number): the longitude portion of the point -- **latitude** (number): the latitude portion of the point -- returns **geoJson** (object): a GeoJSON Point - -@startDocuBlockInline aqlGeoPoint_1 -@EXAMPLE_AQL{aqlGeoPoint_1} -RETURN GEO_POINT(1.0, 2.0) -@END_EXAMPLE_AQL -@endDocuBlock aqlGeoPoint_1 - -### GEO_POLYGON() - -`GEO_POLYGON(points) → geoJson` - -Construct a GeoJSON Polygon. Needs at least one array representing a loop. -Each loop consists of an array with at least three longitude/latitude pairs. The -first loop must be the outermost, while any subsequent loops will be interpreted -as holes. - -- **points** (array): array of (arrays of) longitude/latitude pairs -- returns **geoJson** (object|null): a valid GeoJSON Polygon - -Simple Polygon: - -@startDocuBlockInline aqlGeoPolygon_1 -@EXAMPLE_AQL{aqlGeoPolygon_1} -RETURN GEO_POLYGON([ - [0.0, 0.0], [7.5, 2.5], [0.0, 5.0] -]) -@END_EXAMPLE_AQL -@endDocuBlock aqlGeoPolygon_1 - -Advanced Polygon with a hole inside: - -@startDocuBlockInline aqlGeoPolygon_2 -@EXAMPLE_AQL{aqlGeoPolygon_2} -RETURN GEO_POLYGON([ - [[35, 10], [45, 45], [15, 40], [10, 20], [35, 10]], - [[20, 30], [35, 35], [30, 20], [20, 30]] -]) -@END_EXAMPLE_AQL -@endDocuBlock aqlGeoPolygon_2 - -### GEO_MULTIPOLYGON() - -`GEO_MULTIPOLYGON(polygons) → geoJson` - -Construct a GeoJSON MultiPolygon. Needs at least two Polygons inside. -See [GEO_POLYGON()](#geopolygon) for the rules of Polygon construction. - -- **polygons** (array): array of arrays of array of longitude/latitude pairs -- returns **geoJson** (object|null): a valid GeoJSON MultiPolygon - -MultiPolygon comprised of a simple Polygon and a Polygon with hole: - -@startDocuBlockInline aqlGeoMultiPolygon_1 -@EXAMPLE_AQL{aqlGeoMultiPolygon_1} -RETURN GEO_MULTIPOLYGON([ - [ - [[40, 40], [20, 45], [45, 30], [40, 40]] - ], - [ - [[20, 35], [10, 30], [10, 10], [30, 5], [45, 20], [20, 35]], - [[30, 20], [20, 15], [20, 25], [30, 20]] - ] -]) -@END_EXAMPLE_AQL -@endDocuBlock aqlGeoMultiPolygon_1 - -Geo Index Functions -------------------- - -{% hint 'warning' %} -The AQL functions `NEAR()`, `WITHIN()` and `WITHIN_RECTANGLE()` are -deprecated starting from version 3.4.0. -Please use the [Geo utility functions](#geo-utility-functions) instead. -{% endhint %} - -AQL offers the following functions to filter data based on -[geo indexes](../../Manual/Indexing/Geo.html). These functions require the collection -to have at least one geo index. If no geo index can be found, calling this -function will fail with an error at runtime. There is no error when explaining -the query however. - -### NEAR() - -{% hint 'warning' %} -`NEAR` is a deprecated AQL function from version 3.4.0 on. -Use [DISTANCE()](#distance) in a query like this instead: - -```js -FOR doc IN doc - SORT DISTANCE(doc.latitude, doc.longitude, paramLatitude, paramLongitude) ASC - RETURN doc -``` -Assuming there exists a geo-type index on `latitude` and `longitude`, the -optimizer will recognize it and accelerate the query. -{% endhint %} - -`NEAR(coll, latitude, longitude, limit, distanceName) → docArray` - -Return at most *limit* documents from collection *coll* that are near -*latitude* and *longitude*. The result contains at most *limit* documents, -returned sorted by distance, with closest distances being returned first. -Optionally, the distances in meters between the specified coordinate -(*latitude* and *longitude*) and the document coordinates can be returned as -well. To make use of that, the desired attribute name for the distance result -has to be specified in the *distanceName* argument. The result documents will -contain the distance value in an attribute of that name. - -- **coll** (collection): a collection -- **latitude** (number): the latitude portion of the search coordinate -- **longitude** (number): the longitude portion of the search coordinate -- **limit** (number, *optional*): cap the result to at most this number of - documents. The default is 100. If more documents than *limit* are found, - it is undefined which ones will be returned. -- **distanceName** (string, *optional*): include the distance to the search - coordinate in each document in the result (in meters), using the attribute - name *distanceName* -- returns **docArray** (array): an array of documents, sorted by distance - (shortest distance first) - -### WITHIN() - -{% hint 'warning' %} -`WITHIN` is a deprecated AQL function from version 3.4.0 on. -Use [DISTANCE()](#distance) in a query like this instead: - -```js -FOR doc IN doc - LET d = DISTANCE(doc.latitude, doc.longitude, paramLatitude, paramLongitude) - FILTER d <= radius - SORT d ASC - RETURN doc -``` - -Assuming there exists a geo-type index on `latitude` and `longitude`, the -optimizer will recognize it and accelerate the query. -{% endhint %} - -`WITHIN(coll, latitude, longitude, radius, distanceName) → docArray` - -Return all documents from collection *coll* that are within a radius of *radius* -around the specified coordinate (*latitude* and *longitude*). The documents -returned are sorted by distance to the search coordinate, with the closest -distances being returned first. Optionally, the distance in meters between the -search coordinate and the document coordinates can be returned as well. To make -use of that, an attribute name for the distance result has to be specified in -the *distanceName* argument. The result documents will contain the distance -value in an attribute of that name. - -- **coll** (collection): a collection -- **latitude** (number): the latitude portion of the search coordinate -- **longitude** (number): the longitude portion of the search coordinate -- **radius** (number): radius in meters -- **distanceName** (string, *optional*): include the distance to the search - coordinate in each document in the result (in meters), using the attribute - name *distanceName* -- returns **docArray** (array): an array of documents, sorted by distance - (shortest distance first) - -### WITHIN_RECTANGLE() - -{% hint 'warning' %} -`WITHIN_RECTANGLE` is a deprecated AQL function from version 3.4.0 on. Use -[GEO_CONTAINS](#geocontains) and a GeoJSON polygon instead: - -```js -LET rect = {type: "Polygon", coordinates: [[[longitude1, latitude1], ...]]]} -FOR doc IN doc - FILTER GEO_CONTAINS(poly, [doc.longitude, doc.latitude]) - RETURN doc -``` -Assuming there exists a geo-type index on `latitude` and `longitude`, the -optimizer will recognize it and accelerate the query. -{% endhint %} - -`WITHIN_RECTANGLE(coll, latitude1, longitude1, latitude2, longitude2) → docArray` - -Return all documents from collection *coll* that are positioned inside the -bounding rectangle with the points (*latitude1*, *longitude1*) and (*latitude2*, -*longitude2*). There is no guaranteed order in which the documents are returned. - -- **coll** (collection): a collection -- **latitude1** (number): the bottom-left latitude portion of the search - coordinate -- **longitude1** (number): the bottom-left longitude portion of the search - coordinate -- **latitude2** (number): the top-right latitude portion of the search - coordinate -- **longitude2** (number): the top-right longitude portion of the search - coordinate -- returns **docArray** (array): an array of documents, in random order diff --git a/Documentation/Books/AQL/Functions/Miscellaneous.md b/Documentation/Books/AQL/Functions/Miscellaneous.md deleted file mode 100644 index 03f5ad9a50d6..000000000000 --- a/Documentation/Books/AQL/Functions/Miscellaneous.md +++ /dev/null @@ -1,410 +0,0 @@ -Miscellaneous functions -======================= - -Control flow functions ----------------------- - -### NOT_NULL() - -`NOT_NULL(alternative, ...) → value` - -Return the first element that is not *null*, and *null* if all alternatives -are *null* themselves. It is also known as `COALESCE()` in SQL. - -- **alternative** (any, *repeatable*): input of arbitrary type -- returns **value** (any): first non-null parameter, or *null* if all arguments - are *null* - -### FIRST_LIST() - -Return the first alternative that is an array, and *null* if none of the -alternatives is an array. - -- **alternative** (any, *repeatable*): input of arbitrary type -- returns **list** (list|null): array / list or null - -### FIRST_DOCUMENT() - -`FIRST_DOCUMENT(value) → doc` - -Return the first alternative that is a document, and *null* if none of the -alternatives is a document. - -- **alternative** (any, *repeatable*): input of arbitrary type -- returns **doc** (object|null): document / object or null - -### Ternary operator - -For conditional evaluation, check out the -[ternary operator](../Operators.md#ternary-operator). - -Database functions ------------------- - -### CHECK_DOCUMENT() - -Introduced in: v3.3.22, v3.4.2 - -`CHECK_DOCUMENT(document) → checkResult` - -Returns *true* if *document* is a valid document object, i.e. a document -without any duplicate attribute names. Will return *false* for any -non-objects/non-documents or documents with duplicate attribute names. - -{% hint 'warning' %} -This is an internal function for validating database objects and -is not supposed to be useful for anything else. -{% endhint %} - -The primary use case for this function is to apply it on all -documents in a given collection as follows: - -```js -FOR doc IN collection - FILTER !CHECK_DOCUMENT(doc) - RETURN JSON_STRINGIFY(doc) -``` - -This query will return all documents in the given collection with redundant -attribute names and export them. This output can be used for subsequent -cleanup operations. - -{% hint 'info' %} -When using object literals in AQL, there will be an automatic -removal/cleanup of duplicate attribute names, so the function will be effective -only for **already stored** database documents. Therefore, -`RETURN CHECK_DOCUMENT( { a: 1, a: 2 } )` is expected to return `true`. -{% endhint %} - -- **document** (object): an arbitrary document / object -- returns **checkResult** (bool): *true* for any valid objects/documents without - duplicate attribute names, and *false* for any non-objects/non-documents or - objects/documents with duplicate attribute names - -### COLLECTION_COUNT() - -`COLLECTION_COUNT(coll) → count` - -Determine the amount of documents in a collection. [LENGTH()](#length) -is preferred. - -### COLLECTIONS() - -`COLLECTIONS() → docArray` - -Return an array of collections. - -- returns **docArray** (array): each collection as a document with attributes - *name* and *_id* in an array - -### COUNT() - -This is an alias for [LENGTH()](#length). - -### CURRENT_USER() - -`CURRENT_USER() → userName` - -Return the name of the current user. - -The current user is the user account name that was specified in the -*Authorization* HTTP header of the request. It will only be populated if -authentication on the server is turned on, and if the query was executed inside -a request context. Otherwise, the return value of this function will be *null*. - -- returns **userName** (string|null): the current user name, or *null* if - authentication is disabled - -### DECODE_REV() - -`DECODE_REV(revision) → details` - -Decompose the specified `revision` string into its components. -The resulting object has a `date` and a `count` attribute. -This function is supposed to be called with the `_rev` attribute value -of a database document as argument. - -- **revision** (string): revision ID string -- returns **details** (object|null): object with two attributes - *date* (string in ISO 8601 format) and *count* (integer number), - or *null* - -If the input revision ID is not a string or cannot be processed, the function -issues a warning and returns *null*. - -Please note that the result structure may change in future versions of -ArangoDB in case the internal format of revision strings is modified. Please -also note that the *date* value in the current result provides the date and -time of when the document record was put together on the server, but not -necessarily the time of insertion into the underlying storage engine. Therefore -in case of concurrent document operations the exact document storage order -cannot be derived unambiguously from the revision value. It should thus be -treated as a rough estimate of when a document was created or last updated. - -```js -DECODE_REV( "_YU0HOEG---" ) -// { "date" : "2019-03-11T16:15:05.314Z", "count" : 0 } -``` - -### DOCUMENT() - -`DOCUMENT(collection, id) → doc` - -Return the document which is uniquely identified by its *id*. ArangoDB will -try to find the document using the *_id* value of the document in the specified -collection. - -If there is a mismatch between the *collection* passed and the -collection specified in *id*, then *null* will be returned. Additionally, -if the *collection* matches the collection value specified in *id* but the -document cannot be found, *null* will be returned. - -This function also allows *id* to be an array of ids. In this case, the -function will return an array of all documents that could be found. - -It is also possible to specify a document key instead of an id, or an array -of keys to return all documents that can be found. - -- **collection** (string): name of a collection -- **id** (string|array): a document handle string (consisting of collection - name and document key), a document key, or an array of both document handle - strings and document keys -- returns **doc** (document|array|null): the content of the found document, - an array of all found documents or *null* if nothing was found - -```js -DOCUMENT( users, "users/john" ) -DOCUMENT( users, "john" ) - -DOCUMENT( users, [ "users/john", "users/amy" ] ) -DOCUMENT( users, [ "john", "amy" ] ) -``` - -`DOCUMENT(id) → doc` - -The function can also be used with a single parameter *id* as follows: - -- **id** (string|array): either a document handle string (consisting of - collection name and document key) or an array of document handle strings -- returns **doc** (document|null): the content of the found document - or *null* if nothing was found - -```js -DOCUMENT("users/john") -DOCUMENT( [ "users/john", "users/amy" ] ) -``` - -Please also consider to use -[`DOCUMENT` in conjunction with `WITH`](../Operations/With.md) - -### LENGTH() - -`LENGTH(coll) → documentCount` - -Determine the amount of documents in a collection. - -It calls [COLLECTION_COUNT()](#collectioncount) internally. - -- **coll** (collection): a collection (not string) -- returns **documentCount** (number): the total amount of documents in *coll* - -*LENGTH()* can also determine the [number of elements](Array.md#length) in an array, -the [number of attribute keys](Document.md#length) of an object / document and -the [character length](String.md#length) of a string. - -Hash functions --------------- - -### HASH() - -`HASH(value) → hashNumber` - -Calculate a hash value for *value*. - -- **value** (any): an element of arbitrary type -- returns **hashNumber** (number): a hash value of *value* - -*value* is not required to be a string, but can have any data type. The calculated -hash value will take the data type of *value* into account, so for example the -number *1* and the string *"1"* will have different hash values. For arrays the -hash values will be equal if the arrays contain exactly the same values -(including value types) in the same order. For objects the same hash values will -be created if the objects have exactly the same attribute names and values -(including value types). The order in which attributes appear inside objects -is not important for hashing. - -The hash value returned by this function is a number. The hash algorithm is not -guaranteed to remain the same in future versions of ArangoDB. The hash values -should therefore be used only for temporary calculations, e.g. to compare if two -documents are the same, or for grouping values in queries. - -### String-based hashing - -See the following string functions: - -- [CRC32()](String.md#crc32) -- [FNV64()](String.md#fnv64) -- [MD5()](String.md#md5) -- [SHA1()](String.md#sha1) -- [SHA512()](String.md#sha512) - -Function calling ----------------- - -### APPLY() - -`APPLY(functionName, arguments) → retVal` - -Dynamically call the function *funcName* with the arguments specified. -Arguments are given as array and are passed as separate parameters to -the called function. - -Both built-in and user-defined functions can be called. - -- **funcName** (string): a function name -- **arguments** (array, *optional*): an array with elements of arbitrary type -- returns **retVal** (any): the return value of the called function - -```js -APPLY( "SUBSTRING", [ "this is a test", 0, 7 ] ) -// "this is" -``` - -### ASSERT() / WARN() - -`ASSERT(expr, message) → retVal`
-`WARN(expr, message) → retVal` - -The two functions evaluate an expression. In case the expression evaluates to -*true* both functions will return *true*. If the expression evaluates to -*false* *ASSERT* will throw an error and *WARN* will issue a warning and return -*false*. This behavior allows the use of *ASSERT* and *WARN* in *FILTER* -conditions. - -- **expr** (expression): AQL expression to be evaluated -- **message** (string): message that will be used in exception or warning if expression evaluates to false -- returns **retVal** (bool): returns true if expression evaluates to true - -```js -FOR i IN 1..3 FILTER ASSERT(i > 0, "i is not greater 0") RETURN i -FOR i IN 1..3 FILTER WARN(i < 2, "i is not smaller 2") RETURN i -``` -### CALL() - -`CALL(funcName, arg1, arg2, ... argN) → retVal` - -Dynamically call the function *funcName* with the arguments specified. -Arguments are given as multiple parameters and passed as separate -parameters to the called function. - -Both built-in and user-defined functions can be called. - -- **funcName** (string): a function name -- **args** (any, *repeatable*): an arbitrary number of elements as - multiple arguments, can be omitted -- returns **retVal** (any): the return value of the called function - -```js -CALL( "SUBSTRING", "this is a test", 0, 4 ) -// "this" -``` - -Internal functions ------------------- - -The following functions are used during development of ArangoDB as a database -system, primarily for unit testing. They are not intended to be used by end -users, especially not in production environments. - -### FAIL() - -`FAIL(reason)` - -Let a query fail on purpose. Can be used in a conditional branch, or to verify -if lazy evaluation / short circuiting is used for instance. - -- **reason** (string): an error message -- returns nothing, because the query is aborted - -```js -RETURN 1 == 1 ? "okay" : FAIL("error") // "okay" -RETURN 1 == 1 || FAIL("error") ? true : false // true -RETURN 1 == 2 && FAIL("error") ? true : false // false -RETURN 1 == 1 && FAIL("error") ? true : false // aborted with error -``` - -### NOOPT() - -`NOOPT(value) → retVal` - -No-operation that prevents certain query compile-time and run-time optimizations. -Constant expressions can be forced to be evaluated at runtime with this. -This function is marked as non-deterministic so its argument withstands -query optimization. There is no need to call this function explicitly, it is -mainly used for internal testing. - -- **value** (any): a value of arbitrary type -- returns **retVal** (any): *value* - -```js -// differences in execution plan (explain) -FOR i IN 1..3 RETURN (1 + 1) // const assignment -FOR i IN 1..3 RETURN NOOPT(1 + 1) // simple expression - -NOOPT( 123 ) // evaluates 123 at runtime -NOOPT( CONCAT("a", "b") ) // evaluates concatenation at runtime -``` - -### PASSTHRU() - -`PASSTHRU(value) → retVal` - -Simply returns its call argument unmodified. There is no need to call this function -explicitly, it is mainly used for internal testing. - -- **value** (any): a value of arbitrary type -- returns **retVal** (any): *value* - -### SLEEP() - -`SLEEP(seconds) → null` - -Wait for a certain amount of time before continuing the query. - -- **seconds** (number): amount of time to wait -- returns a *null* value - -```js -SLEEP(1) // wait 1 second -SLEEP(0.02) // wait 20 milliseconds -``` - -### V8() - -`V8(expression) → retVal` - -No-operation that enforces the usage of the V8 JavaScript engine. There is -no need to call this function explicitly, it is mainly used for internal -testing. - -- **expression** (any): arbitrary expression -- returns **retVal** (any): the return value of the *expression* - -```js -// differences in execution plan (explain) -FOR i IN 1..3 RETURN (1 + 1) // const assignment -FOR i IN 1..3 RETURN V8(1 + 1) // simple expression -``` - -### VERSION() - -`VERSION() → serverVersion` - -Returns the server version as a string. In a cluster, returns the version -of the coordinator. - -- returns **serverVersion** (string): the server version string - -```js -RETURN VERSION() // e.g. "3.4.0" -``` diff --git a/Documentation/Books/AQL/Functions/Numeric.md b/Documentation/Books/AQL/Functions/Numeric.md deleted file mode 100644 index 371d3e94b34a..000000000000 --- a/Documentation/Books/AQL/Functions/Numeric.md +++ /dev/null @@ -1,629 +0,0 @@ -Numeric functions -================= - -AQL offers some numeric functions for calculations. The following functions are -supported: - -ABS() ------ - -`ABS(value) → unsignedValue` - -Return the absolute part of *value*. - -- **value** (number): any number, positive or negative -- returns **unsignedValue** (number): the number without + or - sign - -```js -ABS(-5) // 5 -ABS(+5) // 5 -ABS(3.5) // 3.5 -``` - -ACOS() ------- - -`ACOS(value) → num` - -Return the arccosine of *value*. - -- **value** (number): the input value -- returns **num** (number|null): the arccosine of *value*, or *null* if *value* is - outside the valid range -1 and 1 (inclusive) - -```js -ACOS(-1) // 3.141592653589793 -ACOS(0) // 1.5707963267948966 -ACOS(1) // 0 -ACOS(2) // null -``` - -ASIN() ------- - -`ASIN(value) → num` - -Return the arcsine of *value*. - -- **value** (number): the input value -- returns **num** (number|null): the arcsine of *value*, or *null* if *value* is - outside the valid range -1 and 1 (inclusive) - -```js -ASIN(1) // 1.5707963267948966 -ASIN(0) // 0 -ASIN(-1) // -1.5707963267948966 -ASIN(2) // null -``` - -ATAN() ------- - -`ATAN(value) → num` - -Return the arctangent of *value*. - -- **value** (number): the input value -- returns **num** (number): the arctangent of *value* - -```js -ATAN(-1) // -0.7853981633974483 -ATAN(0) // 0 -ATAN(10) // 1.4711276743037347 -``` - -ATAN2() -------- - -`ATAN2(y, x) → num` - -Return the arctangent of the quotient of *y* and *x*. - -```js -ATAN2(0, 0) // 0 -ATAN2(1, 0) // 1.5707963267948966 -ATAN2(1, 1) // 0.7853981633974483 -ATAN2(-10, 20) // -0.4636476090008061 -``` - -AVERAGE() ---------- - -`AVERAGE(numArray) → mean` - -Return the average (arithmetic mean) of the values in *array*. - -- **numArray** (array): an array of numbers, *null* values are ignored -- returns **mean** (number|null): the average value of *numArray*. If the array is - empty or contains *null* values only, *null* will be returned. - -```js -AVERAGE( [5, 2, 9, 2] ) // 4.5 -AVERAGE( [ -3, -5, 2 ] ) // -2 -AVERAGE( [ 999, 80, 4, 4, 4, 3, 3, 3 ] ) // 137.5 -``` - -## AVG() - -This is an alias for [AVERAGE()](#average). - -CEIL() ------- - -`CEIL(value) → roundedValue` - -Return the integer closest but not less than *value*. - -To round downward, see [FLOOR()](#floor).
-To round to the nearest integer value, see [ROUND()](#round). - -- **value** (number): any number -- returns **roundedValue** (number): the value rounded to the ceiling - -```js -CEIL(2.49) // 3 -CEIL(2.50) // 3 -CEIL(-2.50) // -2 -CEIL(-2.51) // -2 -``` - -COS() ------ - -`COS(value) → num` - -Return the cosine of *value*. - -- **value** (number): the input value -- returns **num** (number): the cosine of *value* - -```js -COS(1) // 0.5403023058681398 -COS(0) // 1 -COS(-3.141592653589783) // -1 -COS(RADIANS(45)) // 0.7071067811865476 -``` - -DEGREES() ---------- - -`DEGREES(rad) → num` - -Return the angle converted from radians to degrees. - -- **rad** (number): the input value -- returns **num** (number): the angle in degrees - -```js -DEGREES(0.7853981633974483) // 45 -DEGREES(0) // 0 -DEGREES(3.141592653589793) // 180 -``` - -EXP() ------ - -`EXP(value) → num` - -Return Euler's constant (2.71828...) raised to the power of *value*. - -- **value** (number): the input value -- returns **num** (number): Euler's constant raised to the power of *value* - -```js -EXP(1) // 2.718281828459045 -EXP(10) // 22026.46579480671 -EXP(0) // 1 -``` - -EXP2() ------- - -`EXP2(value) → num` - -Return 2 raised to the power of *value*. - -- **value** (number): the input value -- returns **num** (number): 2 raised to the power of *value* - -```js -EXP2(16) // 65536 -EXP2(1) // 2 -EXP2(0) // 1 -``` - -FLOOR() -------- - -`FLOOR(value) → roundedValue` - -Return the integer closest but not greater than *value*. - -To round upward, see [CEIL()](#ceil).
-To round to the nearest integer value, see [ROUND()](#round). - -- **value** (number): any number -- returns **roundedValue** (number): the value rounded downward - -```js -FLOOR(2.49) // 2 -FLOOR(2.50) // 2 -FLOOR(-2.50) // -3 -FLOOR(-2.51) // -3 -``` - -LOG() ------ - -`LOG(value) → num` - -Return the natural logarithm of *value*. The base is Euler's -constant (2.71828...). - -- **value** (number): the input value -- returns **num** (number|null): the natural logarithm of *value*, or *null* if *value* is - equal or less than 0 - -```js -LOG(2.718281828459045) // 1 -LOG(10) // 2.302585092994046 -LOG(0) // null -``` - -LOG2() ------- - -`LOG2(value) → num` - -Return the base 2 logarithm of *value*. - -- **value** (number): the input value -- returns **num** (number|null): the base 2 logarithm of *value*, or *null* if *value* is - equal or less than 0 - -```js -LOG2(1024) // 10 -LOG2(8) // 3 -LOG2(0) // null -``` - -LOG10() -------- - -`LOG10(value) → num` - -Return the base 10 logarithm of *value*. - -- **value** (number): the input value -- returns **num** (number): the base 10 logarithm of *value*, or *null* if *value* is - equal or less than 0 - -```js -LOG10(10000) // 10 -LOG10(10) // 1 -LOG10(0) // null -``` -MAX() ------ - -`MAX(anyArray) → max` - -Return the greatest element of *anyArray*. The array is not limited to numbers. -Also see [type and value order](../Fundamentals/TypeValueOrder.md). - -- **anyArray** (array): an array of numbers, *null* values are ignored -- returns **max** (any|null): the element with the greatest value. If the array is - empty or contains *null* values only, the function will return *null*. - -```js -MAX( [5, 9, -2, null, 1] ) // 9 -MAX( [ null, null ] ) // null -``` - -MEDIAN() --------- - -`MEDIAN(numArray) → median` - -Return the median value of the values in *array*. - -The array is sorted and the element in the middle is returned. If the array has an -even length of elements, the two center-most elements are interpolated by calculating -the average value (arithmetic mean). - -- **numArray** (array): an array of numbers, *null* values are ignored -- returns **median** (number|null): the median of *numArray*. If the array is - empty or contains *null* values only, the function will return *null*. - -```js -MEDIAN( [ 1, 2, 3] ) // 2 -MEDIAN( [ 1, 2, 3, 4 ] ) // 2.5 -MEDIAN( [ 4, 2, 3, 1 ] ) // 2.5 -MEDIAN( [ 999, 80, 4, 4, 4, 3, 3, 3 ] ) // 4 -``` - -MIN() ------ - -`MIN(anyArray) → min` - -Return the smallest element of *anyArray*. The array is not limited to numbers. -Also see [type and value order](../Fundamentals/TypeValueOrder.md). - -- **anyArray** (array): an array of numbers, *null* values are ignored -- returns **min** (any|null): the element with the smallest value. If the array is - empty or contains *null* values only, the function will return *null*. - -```js -MIN( [5, 9, -2, null, 1] ) // -2 -MIN( [ null, null ] ) // null -``` - -PERCENTILE() ------------- - -`PERCENTILE(numArray, n, method) → percentile` - -Return the *n*th percentile of the values in *numArray*. - -- **numArray** (array): an array of numbers, *null* values are ignored -- **n** (number): must be between 0 (excluded) and 100 (included) -- **method** (string, *optional*): "rank" (default) or "interpolation" -- returns **percentile** (number|null): the *n*th percentile, or *null* if the - array is empty or only *null* values are contained in it or the percentile - cannot be calculated - -```js -PERCENTILE( [1, 2, 3, 4], 50 ) // 2 -PERCENTILE( [1, 2, 3, 4], 50, "rank" ) // 2 -PERCENTILE( [1, 2, 3, 4], 50, "interpolation" ) // 2.5 -``` - -PI() ----- - -`PI() → pi` - -Return pi. - -- returns **pi** (number): the first few significant digits of pi (3.141592653589793) - -```js -PI() // 3.141592653589793 -``` - -POW() ------ - -`POW(base, exp) → num` - -Return the *base* to the exponent *exp*. - -- **base** (number): the base value -- **exp** (number): the exponent value -- returns **num** (number): the exponentiated value - -```js -POW( 2, 4 ) // 16 -POW( 5, -1 ) // 0.2 -POW( 5, 0 ) // 1 -``` - -RADIANS() ---------- - -`RADIANS(deg) → num` - -Return the angle converted from degrees to radians. - -- **deg** (number): the input value -- returns **num** (number): the angle in radians - -```js -RADIANS(180) // 3.141592653589793 -RADIANS(90) // 1.5707963267948966 -RADIANS(0) // 0 -``` - -RAND() ------- - -`RAND() → randomNumber` - -Return a pseudo-random number between 0 and 1. - -- returns **randomNumber** (number): a number greater than 0 and less than 1 - -```js -RAND() // 0.3503170117504508 -RAND() // 0.6138226173882478 -``` - -Complex example: - -```js -LET coinFlips = ( - FOR i IN 1..100000 - RETURN RAND() > 0.5 ? "heads" : "tails" -) -RETURN MERGE( - FOR flip IN coinFlips - COLLECT f = flip WITH COUNT INTO count - RETURN { [f]: count } -) -``` - -Result: - -```json -[ - { - "heads": 49902, - "tails": 50098 - } -] -``` - -RANGE() -------- - -`RANGE(start, stop, step) → numArray` - -Return an array of numbers in the specified range, optionally with increments -other than 1. The *start* and *stop* arguments are truncated to integers -unless a *step* argument is provided. - -Also see the [range operator](../Operators.md#range-operator) for ranges -with integer bounds and a step size of 1. - -- **start** (number): the value to start the range at (inclusive) -- **stop** (number): the value to end the range with (inclusive) -- **step** (number, *optional*): how much to increment in every step, - the default is *1.0* -- returns **numArray** (array): all numbers in the range as array - -```js -RANGE(1, 4) // [ 1, 2, 3, 4 ] -RANGE(1, 4, 2) // [ 1, 3 ] -RANGE(1, 4, 3) // [ 1, 4 ] -RANGE(1.5, 2.5) // [ 1, 2 ] -RANGE(1.5, 2.5, 1) // [ 1.5, 2.5 ] -RANGE(1.5, 2.5, 0.5) // [ 1.5, 2, 2.5 ] -RANGE(-0.75, 1.1, 0.5) // [ -0.75, -0.25, 0.25, 0.75 ] -``` - -ROUND() -------- - -`ROUND(value) → roundedValue` - -Return the integer closest to *value*. - -- **value** (number): any number -- returns **roundedValue** (number): the value rounded to the closest integer - -```js -ROUND(2.49) // 2 -ROUND(2.50) // 3 -ROUND(-2.50) // -2 -ROUND(-2.51) // -3 -``` - -Rounding towards zero, also known as *trunc()* in C/C++, can be achieved with -a combination of the [ternary operator](../Operators.md#ternary-operator), -[CEIL()](#ceil) -and [FLOOR()](#floor): - -```js -value >= 0 ? FLOOR(value) : CEIL(value) -``` - -SIN() ------ - -`SIN(value) → num` - -Return the sine of *value*. - -- **value** (number): the input value -- returns **num** (number): the sine of *value* - -```js -SIN(3.141592653589783 / 2) // 1 -SIN(0) // 0 -SIN(-3.141592653589783 / 2) // -1 -SIN(RADIANS(270)) // -1 -``` - -SQRT() ------- - -`SQRT(value) → squareRoot` - -Return the square root of *value*. - -- **value** (number): a number -- returns **squareRoot** (number): the square root of *value* - -```js -SQRT(9) // 3 -SQRT(2) // 1.4142135623730951 -``` - -Other roots can be calculated with [POW()](#pow) like `POW(value, 1/n)`: - -```js -// 4th root of 8*8*8*8 = 4096 -POW(4096, 1/4) // 8 - -// cube root of 3*3*3 = 27 -POW(27, 1/3) // 3 - -// square root of 3*3 = 9 -POW(9, 1/2) // 3 -``` - -STDDEV_POPULATION() -------------------- - -`STDDEV_POPULATION(numArray) → num` - -Return the population standard deviation of the values in *array*. - -- **numArray** (array): an array of numbers, *null* values are ignored -- returns **num** (number|null): the population standard deviation of *numArray*. - If the array is empty or only *null* values are contained in the array, - *null* will be returned. - -```js -STDDEV_POPULATION( [ 1, 3, 6, 5, 2 ] ) // 1.854723699099141 -``` - -STDDEV_SAMPLE() ---------------- - -`STDDEV_SAMPLE(numArray) → num` - -Return the sample standard deviation of the values in *array*. - -- **numArray** (array): an array of numbers, *null* values are ignored -- returns **num** (number|null): the sample standard deviation of *numArray*. - If the array is empty or only *null* values are contained in the array, - *null* will be returned. - -```js -STDDEV_SAMPLE( [ 1, 3, 6, 5, 2 ] ) // 2.0736441353327724 -``` - -## STDDEV() - -This is an alias for [STDDEV_POPULATION()](#stddevpopulation). - -SUM() ------ - -`SUM(numArray) → sum` - -Return the sum of the values in *array*. - -- **numArray** (array): an array of numbers, *null* values are ignored -- returns **sum** (number): the total of all values in *numArray*. If the array - is empty or only *null* values are contained in the array, *0* will be returned. - -```js -SUM( [1, 2, 3, 4] ) // 10 -SUM( [null, -5, 6] ) // 1 -SUM( [ ] ) // 0 -``` - -TAN() ------ - -`TAN(value) → num` - -Return the tangent of *value*. - -- **value** (number): the input value -- returns **num** (number): the tangent of *value* - -```js -TAN(10) // 0.6483608274590866 -TAN(5) // -3.380515006246586 -TAN(0) // 0 -``` - -VARIANCE_POPULATION() ---------------------- - -`VARIANCE_POPULATION(numArray) → num` - -Return the population variance of the values in *array*. - -- **numArray** (array): an array of numbers, *null* values are ignored -- returns **num** (number|null): the population variance of *numArray*. - If the array is empty or only *null* values are contained in the array, - *null* will be returned. - -```js -VARIANCE_POPULATION( [ 1, 3, 6, 5, 2 ] ) // 3.4400000000000004 -``` - -VARIANCE_SAMPLE() ------------------ - -`VARIANCE_SAMPLE(array) → num` - -Return the sample variance of the values in *array*. - -- **numArray** (array): an array of numbers, *null* values are ignored -- returns **num** (number|null): the sample variance of *numArray*. - If the array is empty or only *null* values are contained in the array, - *null* will be returned. - -```js -VARIANCE_SAMPLE( [ 1, 3, 6, 5, 2 ] ) // 4.300000000000001 -``` - -## VARIANCE() - -This is an alias for [VARIANCE_POPULATION()](#variancepopulation). diff --git a/Documentation/Books/AQL/Functions/README.md b/Documentation/Books/AQL/Functions/README.md deleted file mode 100644 index af2c6fce6f50..000000000000 --- a/Documentation/Books/AQL/Functions/README.md +++ /dev/null @@ -1,34 +0,0 @@ -Functions -========= - -AQL supports functions to allow more complex computations. Functions can be -called at any query position where an expression is allowed. The general -function call syntax is: - -```js -FUNCTIONNAME(arguments) -``` - -where *FUNCTIONNAME* is the name of the function to be called, and *arguments* -is a comma-separated list of function arguments. If a function does not need any -arguments, the argument list can be left empty. However, even if the argument -list is empty the parentheses around it are still mandatory to make function -calls distinguishable from variable names. - -Some example function calls: - -```js -HAS(user, "name") -LENGTH(friends) -COLLECTIONS() -``` - -In contrast to collection and variable names, function names are case-insensitive, -i.e. *LENGTH(foo)* and *length(foo)* are equivalent. - -Extending AQL -------------- - -It is possible to extend AQL with user-defined functions. These functions need to -be written in JavaScript, and have to be registered before they can be used in a query. -Please refer to [Extending AQL](../Extending/index.html) for more details. diff --git a/Documentation/Books/AQL/Functions/String.md b/Documentation/Books/AQL/Functions/String.md deleted file mode 100644 index 24f9fec11f36..000000000000 --- a/Documentation/Books/AQL/Functions/String.md +++ /dev/null @@ -1,942 +0,0 @@ -String functions -================ - -For string processing, AQL offers the following functions: - -CHAR_LENGTH() -------------- - -`CHAR_LENGTH(value) → length` - -Return the number of characters in *value* (not byte length). - -|input|length| -|---|---| -|String|number of unicode characters| -|Number|number of unicode characters that represent the number| -|Array / Object|number of unicode characters from the resulting stringification| -|true| 4 | -|false| 5 | -|null| 0 | - -CONCAT() --------- - -`CONCAT(value1, value2, ... valueN) → str` - -Concatenate the values passed as *value1* to *valueN*. - -- **values** (any, *repeatable*): elements of arbitrary type (at least 1) -- returns **str** (string): a concatenation of the elements. *null* values - are ignored. - -```js -CONCAT("foo", "bar", "baz") // "foobarbaz" -CONCAT(1, 2, 3) // "123" -CONCAT("foo", [5, 6], {bar: "baz"}) // "foo[5,6]{\"bar\":\"baz\"}" -``` - -`CONCAT(anyArray) → str` - -If a single array is passed to *CONCAT()*, its members are concatenated. - -- **anyArray** (array): array with elements of arbitrary type -- returns **str** (string): a concatenation of the array elements. *null* values - are ignored. - -```js -CONCAT( [ "foo", "bar", "baz" ] ) // "foobarbaz" -CONCAT( [1, 2, 3] ) // "123" -``` - -CONCAT_SEPARATOR() ------------------- - -`CONCAT_SEPARATOR(separator, value1, value2, ... valueN) → joinedString` - -Concatenate the strings passed as arguments *value1* to *valueN* using the -*separator* string. - -- **separator** (string): an arbitrary separator string -- **values** (string|array, *repeatable*): strings or arrays of strings as multiple - arguments (at least 1) -- returns **joinedString** (string): a concatenated string of the elements, using - *separator* as separator string. *null* values are ignored. Array value arguments - are expanded automatically, and their individual members will be concatenated. - Nested arrays will be expanded too, but with their elements separated by commas - if they have more than a single element. - -```js -CONCAT_SEPARATOR(", ", "foo", "bar", "baz") -// "foo, bar, baz" - -CONCAT_SEPARATOR(", ", [ "foo", "bar", "baz" ]) -// "foo, bar, baz" - -CONCAT_SEPARATOR(", ", [ "foo", [ "b", "a", "r" ], "baz" ]) -// [ "foo, b,a,r, baz" ] - -CONCAT_SEPARATOR("-", [1, 2, 3, null], [4, null, 5]) -// "1-2-3-4-5" -``` - -CONTAINS() ----------- - -`CONTAINS(text, search, returnIndex) → match` - -Check whether the string *search* is contained in the string *text*. -The string matching performed by *CONTAINS* is case-sensitive. - -- **text** (string): the haystack -- **search** (string): the needle -- **returnIndex** (bool, *optional*): if set to *true*, the character position - of the match is returned instead of a boolean. The default is *false*. - The default is *false*. -- returns **match** (bool|number): by default, *true* is returned if *search* - is contained in *text*, and *false* otherwise. With *returnIndex* set to *true*, - the position of the first occurrence of *search* within *text* is returned - (starting at offset 0), or *-1* if *search* is not contained in *text*. - -```js -CONTAINS("foobarbaz", "bar") // true -CONTAINS("foobarbaz", "horse") // false -CONTAINS("foobarbaz", "ba", true) // 3 -CONTAINS("foobarbaz", "horse", true) // -1 -``` - -To determine if or at which position a value is included in an array, see the -[POSITION() array function](Array.md#position). - -COUNT() -------- - -This is an alias for [LENGTH()](#length). - -CRC32() ------ - -`CRC32(text) → hash` - -Calculate the CRC32 checksum for *text* and return it in a hexadecimal -string representation. The polynomial used is 0x1EDC6F41. The initial -value used is 0xFFFFFFFF, and the final xor value is also 0xFFFFFFFF. - -- **text** (string): a string -- returns **hash** (string): CRC32 checksum as hex string - -```js -CRC32("foobar") // "D5F5C7F" -``` - -ENCODE_URI_COMPONENT() ------------ - -`ENCODE_URI_COMPONENT(value) → encodedURIComponentString` - -Return the encoded uri component of *value*. - -- **value** (string): a string -- returns **encodedURIComponentString** (string): an encoded uri component of *value* - -FIND_FIRST() ------------- - -`FIND_FIRST(text, search, start, end) → position` - -Return the position of the first occurrence of the string *search* inside the -string *text*. Positions start at 0. - -- **text** (string): the haystack -- **search** (string): the needle -- **start** (number, *optional*): limit the search to a subset of the text, - beginning at *start* -- **end** (number, *optional*): limit the search to a subset of the text, - ending at *end* -- returns **position** (number): the character position of the match. If *search* - is not contained in *text*, -1 is returned. If **search** is empty, **start** is returned. - -```js -FIND_FIRST("foobarbaz", "ba") // 3 -FIND_FIRST("foobarbaz", "ba", 4) // 6 -FIND_FIRST("foobarbaz", "ba", 0, 3) // -1 -``` - -FIND_LAST() ------------ - -`FIND_LAST(text, search, start, end) → position` - -Return the position of the last occurrence of the string *search* inside the -string *text*. Positions start at 0. - -- **text** (string): the haystack -- **search** (string): the needle -- **start** (number, *optional*): limit the search to a subset of the text, - beginning at *start* -- **end** (number, *optional*): limit the search to a subset of the text, - ending at *end* -- returns **position** (number): the character position of the match. If *search* - is not contained in *text*, -1 is returned. - If *search* is empty, the string length is returned, or *end* + 1. - -```js -FIND_LAST("foobarbaz", "ba") // 6 -FIND_LAST("foobarbaz", "ba", 7) // -1 -FIND_LAST("foobarbaz", "ba", 0, 4) // 3 -``` - -FNV64() ------ - -`FNV64(text) → hash` - -Calculate the FNV-1A 64 bit hash for *text* and return it in a hexadecimal -string representation. - -- **text** (string): a string -- returns **hash** (string): FNV-1A hash as hex string - -```js -FNV64("foobar") // "85944171F73967E8" -``` - -JSON_PARSE() ------------- - -`JSON_PARSE(text) → value` - -Return an AQL value described by the JSON-encoded input string. - -- **text** (string): the string to parse as JSON -- returns **value** (mixed): the value corresponding to the given JSON text. - For input values that are no valid JSON strings, the function will return *null*. - -```js -JSON_PARSE("123") // 123 -JSON_PARSE("[ true, false, 2 ]") // [ true, false, 2 ] -JSON_PARSE("\\\"abc\\\"") // "abc" -JSON_PARSE("{\\\"a\\\": 1}") // { a : 1 } -JSON_PARSE("abc") // null -``` - -JSON_STRINGIFY() ----------------- - -`JSON_STRINGIFY(value) → text` - -Return a JSON string representation of the input value. - -- **value** (mixed): the value to convert to a JSON string -- returns **text** (string): the JSON string representing *value*. - For input values that cannot be converted to JSON, the function - will return *null*. - -```js -JSON_STRINGIFY("1") // "1" -JSON_STRINGIFY("abc") // "\"abc\"" -JSON_STRINGIFY("[1, 2, 3]") // "[1,2,3]" -``` - -LEFT() ------- - -`LEFT(value, n) → substring` - -Return the *n* leftmost characters of the string *value*. - -To return the rightmost characters, see [RIGHT()](#right).
-To take a part from an arbitrary position off the string, -see [SUBSTRING()](#substring). - -- **value** (string): a string -- **n** (number): how many characters to return -- returns **substring** (string): at most *n* characters of *value*, - starting on the left-hand side of the string - -```js -LEFT("foobar", 3) // "foo" -LEFT("foobar", 10) // "foobar" -``` - -LENGTH() --------- - -`LENGTH(str) → length` - -Determine the character length of a string. - -- **str** (string): a string. If a number is passed, it will be casted to string first. -- returns **length** (number): the character length of *str* (not byte length) - -```js -LENGTH("foobar") // 6 -LENGTH("电脑坏了") // 4 -``` - -*LENGTH()* can also determine the [number of elements](Array.md#length) in an array, -the [number of attribute keys](Document.md#length) of an object / document and -the [amount of documents](Miscellaneous.md#length) in a collection. - -LEVENSHTEIN_DISTANCE() ----------------------- - -`LEVENSHTEIN_DISTANCE(value1, value2) → levenshteinDistance` - -Calculate the [Levenshtein distance](https://en.wikipedia.org/wiki/Levenshtein_distance) -between two strings. - -- **value1** (string): a string -- **value2** (string): a string -- returns **levenshteinDistance** (number): calculated Levenshtein distance - between the input strings *value1* and *value2* - -```js -LEVENSHTEIN_DISTANCE("foobar", "bar") // 3 -LEVENSHTEIN_DISTANCE(" ", "") // 1 -LEVENSHTEIN_DISTANCE("The quick brown fox jumps over the lazy dog", "The quick black dog jumps over the brown fox") // 13 -LEVENSHTEIN_DISTANCE("der mötör trötet", "der trötet") // 6 -``` - -LIKE() ------- - -`LIKE(text, search, caseInsensitive) → bool` - -Check whether the pattern *search* is contained in the string *text*, -using wildcard matching. - -- **text** (string): the string to search in -- **search** (string): a search pattern that can contain the wildcard characters - `%` (meaning any sequence of characters, including none) and `_` (any single - character). Literal *%* and *:* must be escaped with two backslashes (four - in arangosh). - *search* cannot be a variable or a document attribute. The actual value must - be present at query parse time already. -- **caseInsensitive** (bool, *optional*): if set to *true*, the matching will be - case-insensitive. The default is *false*. -- returns **bool** (bool): *true* if the pattern is contained in *text*, - and *false* otherwise - -```js -LIKE("cart", "ca_t") // true -LIKE("carrot", "ca_t") // false -LIKE("carrot", "ca%t") // true - -LIKE("foo bar baz", "bar") // false -LIKE("foo bar baz", "%bar%") // true -LIKE("bar", "%bar%") // true - -LIKE("FoO bAr BaZ", "fOo%bAz") // false -LIKE("FoO bAr BaZ", "fOo%bAz", true) // true -``` - -LOWER() -------- - -`LOWER(value) → lowerCaseString` - -Convert upper-case letters in *value* to their lower-case counterparts. -All other characters are returned unchanged. - -- **value** (string): a string -- returns **lowerCaseString** (string): *value* with upper-case characters converted - to lower-case characters - -LTRIM() -------- - -`LTRIM(value, chars) → strippedString` - -Return the string *value* with whitespace stripped from the start only. - -To strip from the end only, see [RTRIM()](#rtrim).
-To strip both sides, see [TRIM()](#trim). - -- **value** (string): a string -- **chars** (string, *optional*): override the characters that should - be removed from the string. It defaults to `\r\n \t` (i.e. `0x0d`, `0x0a`, - `0x20` and `0x09`). -- returns **strippedString** (string): *value* without *chars* at the - left-hand side - -```js -LTRIM("foo bar") // "foo bar" -LTRIM(" foo bar ") // "foo bar " -LTRIM("--==[foo-bar]==--", "-=[]") // "foo-bar]==--" -``` - -MD5() ------ - -`MD5(text) → hash` - -Calculate the MD5 checksum for *text* and return it in a hexadecimal -string representation. - -- **text** (string): a string -- returns **hash** (string): MD5 checksum as hex string - -```js -MD5("foobar") // "3858f62230ac3c915f300c664312c63f" -``` - -RANDOM_TOKEN() --------------- - -`RANDOM_TOKEN(length) → randomString` - -Generate a pseudo-random token string with the specified length. -The algorithm for token generation should be treated as opaque. - -- **length** (number): desired string length for the token. It must be greater - than 0 and at most 65536. -- returns **randomString** (string): a generated token consisting of lowercase - letters, uppercase letters and numbers - -```js -RANDOM_TOKEN(8) // "zGl09z42" -RANDOM_TOKEN(8) // "m9w50Ft9" -``` - -REGEX_MATCHES() ---------------- - -`REGEX_MATCHES(text, regex, caseInsensitive) → stringArray` - -Return the matches in the given string *text*, using the *regex*. - -- **text** (string): the string to search in -- **regex** (string): a regular expression to use for matching the *text* -- returns **stringArray** (array): an array of strings containing the matches - -The regular expression may consist of literal characters and the following -characters and sequences: - -- `.` – the dot matches any single character except line terminators. - To include line terminators, use `[\s\S]` instead to simulate `.` with *DOTALL* flag. -- `\d` – matches a single digit, equivalent to `[0-9]` -- `\s` – matches a single whitespace character -- `\S` – matches a single non-whitespace character -- `\t` – matches a tab character -- `\r` – matches a carriage return -- `\n` – matches a line-feed character -- `[xyz]` – set of characters. Matches any of the enclosed characters - (here: *x*, *y* or *z*) -- `[^xyz]` – negated set of characters. Matches any other character than the - enclosed ones (i.e. anything but *x*, *y* or *z* in this case) -- `[x-z]` – range of characters. Matches any of the characters in the - specified range, e.g. `[0-9A-F]` to match any character in - *0123456789ABCDEF* -- `[^x-z]` – negated range of characters. Matches any other character than the - ones specified in the range -- `(xyz)` – defines and matches a pattern group -- `(x|y)` – matches either *x* or *y* -- `^` – matches the beginning of the string (e.g. `^xyz`) -- $ – matches the end of the string (e.g. xyz$) - -Note that the characters `.`, `*`, `?`, `[`, `]`, `(`, `)`, `{`, `}`, `^`, -and `$` have a special meaning in regular expressions and may need to be -escaped using a backslash, which requires escaping itself (`\\`). A literal -backslash needs to be escaped using another escaped backslash, i.e. `\\\\`. -In arangosh, the amount of backslashes needs to be doubled. - -Characters and sequences may optionally be repeated using the following -quantifiers: - -- `x*` – matches zero or more occurrences of *x* -- `x+` – matches one or more occurrences of *x* -- `x?` – matches one or zero occurrences of *x* -- `x{y}` – matches exactly *y* occurrences of *x* -- `x{y,z}` – matches between *y* and *z* occurrences of *x* -- `x{y,}` – matches at least *y* occurrences of *x* - -Note that `xyz+` matches *xyzzz*, but if you want to match *xyzxyz* instead, -you need to define a pattern group by wrapping the sub-expression in parentheses -and place the quantifier right behind it: `(xyz)+`. - -If the regular expression in *regex* is invalid, a warning will be raised -and the function will return *null*. - -```js -REGEX_MATCHES("My-us3r_n4m3", "^[a-z0-9_-]{3,16}$", true) // ["My-us3r_n4m3"] -REGEX_MATCHES("#4d82h4", "^#?([a-f0-9]{6}|[a-f0-9]{3})$", true) // null -REGEX_MATCHES("john@doe.com", "^([a-z0-9_\.-]+)@([\da-z-]+)\.([a-z\.]{2,6})$", false) // ["john@doe.com", "john", "doe", "com"] -``` - -REGEX_SPLIT() -------------- - -`REGEX_SPLIT(text, splitExpression, caseInsensitive, limit) → stringArray` - -Split the given string *text* into a list of strings, using the *separator*. - -- **text** (string): the string to split -- **splitExpression** (string): a regular expression to use for splitting the *text* -- **limit** (number, *optional*): limit the number of split values in the result. - If no *limit* is given, the number of splits returned is not bounded. -- returns **stringArray** (array): an array of strings - -The regular expression may consist of literal characters and the following -characters and sequences: - -- `.` – the dot matches any single character except line terminators. - To include line terminators, use `[\s\S]` instead to simulate `.` with *DOTALL* flag. -- `\d` – matches a single digit, equivalent to `[0-9]` -- `\s` – matches a single whitespace character -- `\S` – matches a single non-whitespace character -- `\t` – matches a tab character -- `\r` – matches a carriage return -- `\n` – matches a line-feed character -- `[xyz]` – set of characters. Matches any of the enclosed characters - (here: *x*, *y* or *z*) -- `[^xyz]` – negated set of characters. Matches any other character than the -enclosed ones (i.e. anything but *x*, *y* or *z* in this case) -- `[x-z]` – range of characters. Matches any of the characters in the - specified range, e.g. `[0-9A-F]` to match any character in - *0123456789ABCDEF* -- `[^x-z]` – negated range of characters. Matches any other character than the -ones specified in the range -- `(xyz)` – defines and matches a pattern group -- `(x|y)` – matches either *x* or *y* -- `^` – matches the beginning of the string (e.g. `^xyz`) -- $ – matches the end of the string (e.g. xyz$) - -Note that the characters `.`, `*`, `?`, `[`, `]`, `(`, `)`, `{`, `}`, `^`, -and `$` have a special meaning in regular expressions and may need to be -escaped using a backslash, which requires escaping itself (`\\`). A literal -backslash needs to be escaped using another escaped backslash, i.e. `\\\\`. -In arangosh, the amount of backslashes needs to be doubled. - -Characters and sequences may optionally be repeated using the following -quantifiers: - -- `x*` – matches zero or more occurrences of *x* -- `x+` – matches one or more occurrences of *x* -- `x?` – matches one or zero occurrences of *x* -- `x{y}` – matches exactly *y* occurrences of *x* -- `x{y,z}` – matches between *y* and *z* occurrences of *x* -- `x{y,}` – matches at least *y* occurrences of *x* - -Note that `xyz+` matches *xyzzz*, but if you want to match *xyzxyz* instead, -you need to define a pattern group by wrapping the sub-expression in parentheses -and place the quantifier right behind it: `(xyz)+`. - -If the regular expression in *splitExpression* is invalid, a warning will be raised -and the function will return *null*. - -```js -REGEX_SPLIT("This is a line.\n This is yet another line\r\n This again is a line.\r Mac line ", "\.?(\n|\r|\r\n)", true, 4) // ["This is a line", "\n", " This is yet another lin", "\r"] -REGEX_SPLIT("hypertext language, programming", "[\s, ]+") // ["hypertext", "language", "programming"] -REGEX_SPLIT("ca,bc,a,bca,bca,bc", "a,b", true, 5) // ["c", "c,", "c", "c", "c"] -``` - -REGEX_TEST() ------------- - -`REGEX_TEST(text, search, caseInsensitive) → bool` - -Check whether the pattern *search* is contained in the string *text*, -using regular expression matching. - -- **text** (string): the string to search in -- **search** (string): a regular expression search pattern -- returns **bool** (bool): *true* if the pattern is contained in *text*, - and *false* otherwise -- **caseInsensitive** (bool, *optional*): if set to *true*, the matching will be - case-insensitive. The default is *false*. - -The regular expression may consist of literal characters and the following -characters and sequences: - -- `.` – the dot matches any single character except line terminators. - To include line terminators, use `[\s\S]` instead to simulate `.` with *DOTALL* flag. -- `\d` – matches a single digit, equivalent to `[0-9]` -- `\s` – matches a single whitespace character -- `\S` – matches a single non-whitespace character -- `\t` – matches a tab character -- `\r` – matches a carriage return -- `\n` – matches a line-feed character -- `[xyz]` – set of characters. Matches any of the enclosed characters - (here: *x*, *y* or *z*) -- `[^xyz]` – negated set of characters. Matches any other character than the - enclosed ones (i.e. anything but *x*, *y* or *z* in this case) -- `[x-z]` – range of characters. Matches any of the characters in the - specified range, e.g. `[0-9A-F]` to match any character in - *0123456789ABCDEF* -- `[^x-z]` – negated range of characters. Matches any other character than the - ones specified in the range -- `(xyz)` – defines and matches a pattern group -- `(x|y)` – matches either *x* or *y* -- `^` – matches the beginning of the string (e.g. `^xyz`) -- $ – matches the end of the string (e.g. xyz$) - -Note that the characters `.`, `*`, `?`, `[`, `]`, `(`, `)`, `{`, `}`, `^`, -and `$` have a special meaning in regular expressions and may need to be -escaped using a backslash, which requires escaping itself (`\\`). A literal -backslash needs to be escaped using another escaped backslash, i.e. `\\\\`. -In arangosh, the amount of backslashes needs to be doubled. - -Characters and sequences may optionally be repeated using the following -quantifiers: - -- `x*` – matches zero or more occurrences of *x* -- `x+` – matches one or more occurrences of *x* -- `x?` – matches one or zero occurrences of *x* -- `x{y}` – matches exactly *y* occurrences of *x* -- `x{y,z}` – matches between *y* and *z* occurrences of *x* -- `x{y,}` – matches at least *y* occurrences of *x* - -Note that `xyz+` matches *xyzzz*, but if you want to match *xyzxyz* instead, -you need to define a pattern group by wrapping the sub-expression in parentheses -and place the quantifier right behind it: `(xyz)+`. - -If the regular expression in *search* is invalid, a warning will be raised -and the function will return *null*. - -```js -REGEX_TEST("the quick brown fox", "the.*fox") // true -REGEX_TEST("the quick brown fox", "^(a|the)\s+(quick|slow).*f.x$") // true -REGEX_TEST("the\nquick\nbrown\nfox", "^the(\n[a-w]+)+\nfox$") // true -``` - -REGEX_REPLACE() ---------------- - -`REGEX_REPLACE(text, search, replacement, caseInsensitive) → string` - -Replace the pattern *search* with the string *replacement* in the string -*text*, using regular expression matching. - -- **text** (string): the string to search in -- **search** (string): a regular expression search pattern -- **replacement** (string): the string to replace the *search* pattern with -- returns **string** (string): the string *text* with the *search* regex - pattern replaced with the *replacement* string wherever the pattern exists - in *text* -- **caseInsensitive** (bool, *optional*): if set to *true*, the matching will be - case-insensitive. The default is *false*. - -For more details about the rules for characters and sequences refer -[REGEX_TEST()](#regextest). - -If the regular expression in *search* is invalid, a warning will be raised -and the function will return *null*. - -```js -REGEX_REPLACE("the quick brown fox", "the.*fox", "jumped over") // jumped over -REGEX_REPLACE("the quick brown fox", "o", "i") // the quick briwn fix -``` - -REVERSE() ---------- - -`REVERSE(value) → reversedString` - -Return the reverse of the string *value*. - -- **value** (string): a string -- returns **reversedString** (string): a new string with the characters in - reverse order - -```js -REVERSE("foobar") // "raboof" -REVERSE("电脑坏了") // "了坏脑电" -``` - -RIGHT() -------- - -`RIGHT(value, length) → substring` - -Return the *length* rightmost characters of the string *value*. - -To return the leftmost characters, see [LEFT()](#left).
-To take a part from an arbitrary position off the string, -see [SUBSTRING()](#substring). - -- **value** (string): a string -- **length** (number): how many characters to return -- returns **substring** (string): at most *length* characters of *value*, - starting on the right-hand side of the string - -```js -RIGHT("foobar", 3) // "bar" -RIGHT("foobar", 10) // "foobar" -``` - -RTRIM() -------- - -`RTRIM(value, chars) → strippedString` - -Return the string *value* with whitespace stripped from the end only. - -To strip from the start only, see [LTRIM()](#ltrim).
-To strip both sides, see [TRIM()](#trim). - -- **value** (string): a string -- **chars** (string, *optional*): override the characters that should - be removed from the string. It defaults to `\r\n \t` (i.e. `0x0d`, `0x0a`, - `0x20` and `0x09`). -- returns **strippedString** (string): *value* without *chars* at the - right-hand side - -```js -RTRIM("foo bar") // "foo bar" -RTRIM(" foo bar ") // " foo bar" -RTRIM("--==[foo-bar]==--", "-=[]") // "--==[foo-bar" -``` - -SHA1() ------- - -`SHA1(text) → hash` - -Calculate the SHA1 checksum for *text* and returns it in a hexadecimal -string representation. - -- **text** (string): a string -- returns **hash** (string): SHA1 checksum as hex string - -```js -SHA1("foobar") // "8843d7f92416211de9ebb963ff4ce28125932878" -``` - -SHA512() --------- - -`SHA512(text) → hash` - -Calculate the SHA512 checksum for *text* and returns it in a hexadecimal -string representation. - -- **text** (string): a string -- returns **hash** (string): SHA512 checksum as hex string - -```js -SHA512("foobar") // "0a50261ebd1a390fed2bf326f2673c145582a6342d523204973d0219337f81616a8069b012587cf5635f6925f1b56c360230c19b273500ee013e030601bf2425" -``` - -SPLIT() -------- - -`SPLIT(value, separator, limit) → strArray` - -Split the given string *value* into a list of strings, using the *separator*. - -- **value** (string): a string -- **separator** (string): either a string or a list of strings. If *separator* is - an empty string, *value* will be split into a list of characters. If no *separator* - is specified, *value* will be returned as array. -- **limit** (number, *optional*): limit the number of split values in the result. - If no *limit* is given, the number of splits returned is not bounded. -- returns **strArray** (array): an array of strings - -```js -SPLIT( "foo-bar-baz", "-" ) // [ "foo", "bar", "baz" ] -SPLIT( "foo-bar-baz", "-", 1 ) // [ "foo" ] -SPLIT( "foo, bar & baz", [ ", ", " & " ] ) // [ "foo", "bar", "baz" ] -``` - -SOUNDEX() ------------ - -`SOUNDEX(value) → soundexString` - -Return the soundex fingerprint of *value*. - -- **value** (string): a string -- returns **soundexString** (string): a soundex fingerprint of *value* - -```js -SOUNDEX( "example" ) // "E251" -SOUNDEX( "ekzampul") // "E251" -SOUNDEX( "soundex" ) // "S532" -SOUNDEX( "sounteks" ) // "S532" -``` - -SUBSTITUTE() ------------- - -`SUBSTITUTE(value, search, replace, limit) → substitutedString` - -Replace search values in the string *value*. - -- **value** (string): a string -- **search** (string|array): if *search* is a string, all occurrences of - *search* will be replaced in *value*. If *search* is an array of strings, - each occurrence of a value contained in *search* will be replaced by the - corresponding array element in *replace*. If *replace* has less list items - than *search*, occurrences of unmapped *search* items will be replaced by an - empty string. -- **replace** (string|array, *optional*): a replacement string, or an array of - strings to replace the corresponding elements of *search* with. Can have less - elements than *search* or be left out to remove matches. If *search* is an array - but *replace* is a string, then all matches will be replaced with *replace*. -- **limit** (number, *optional*): cap the number of replacements to this value -- returns **substitutedString** (string): a new string with matches replaced - (or removed) - -```js -SUBSTITUTE( "the quick brown foxx", "quick", "lazy" ) -// "the lazy brown foxx" - -SUBSTITUTE( "the quick brown foxx", [ "quick", "foxx" ], [ "slow", "dog" ] ) -// "the slow brown dog" - -SUBSTITUTE( "the quick brown foxx", [ "the", "foxx" ], [ "that", "dog" ], 1 ) -// "that quick brown foxx" - -SUBSTITUTE( "the quick brown foxx", [ "the", "quick", "foxx" ], [ "A", "VOID!" ] ) -// "A VOID! brown " - -SUBSTITUTE( "the quick brown foxx", [ "quick", "foxx" ], "xx" ) -// "the xx brown xx" -``` - -`SUBSTITUTE(value, mapping, limit) → substitutedString` - -Alternatively, *search* and *replace* can be specified in a combined value. - -- **value** (string): a string -- **mapping** (object): a lookup map with search strings as keys and replacement - strings as values. Empty strings and *null* as values remove matches. - Please note that no sequence of search strings can be warrantied by this; - Means, if you have overlapping search results, one time the first may win, - another time the second. If you need to ensure the precedence of the sequence - choose the array based invocation method. -- **limit** (number, *optional*): cap the number of replacements to this value -- returns **substitutedString** (string): a new string with matches replaced - (or removed) - -```js -SUBSTITUTE("the quick brown foxx", { - "quick": "small", - "brown": "slow", - "foxx": "ant" -}) -// "the small slow ant" - -SUBSTITUTE("the quick brown foxx", { - "quick": "", - "brown": null, - "foxx": "ant" -}) -// "the ant" - -SUBSTITUTE("the quick brown foxx", { - "quick": "small", - "brown": "slow", - "foxx": "ant" -}, 2) -// "the small slow foxx" -``` - -SUBSTRING() ------------ - -`SUBSTRING(value, offset, length) → substring` - -Return a substring of *value*. - -To return the rightmost characters, see [RIGHT()](#right).
-To return the leftmost characters, see [LEFT()](#left). - -- **value** (string): a string -- **offset** (number): start at *offset*, offsets start at position 0 -- **length** (number, *optional*): at most *length* characters, omit to get the - substring from *offset* to the end of the string -- returns **substring** (string): a substring of *value* - -TOKENS() --------- - -`TOKENS(input, analyzer) → strArray` - -Split the *input* string with the help of the specified *analyzer* into a token array. -The resulting array can be used e.g. in subsequent `FILTER` statements with the *IN* operator. -It can help to better understand how the specific analyzer is going to behave. - -- **input** (string): text to tokenize -- **analyzer** (string): one of the available - [ArangoSearch string analyzers](../../Manual/Views/ArangoSearch/Analyzers.html) -- returns **strArray** (array): array of strings, each element being a token - - -TO_BASE64() ------------ - -`TO_BASE64(value) → toBase64String` - -Return the base64 representation of *value*. - -- **value** (string): a string -- returns **toBase64String** (string): a base64 representation of *value* - -TO_HEX() ------------ - -`TO_HEX(value) → toHexString` - -Return the hex representation of *value*. - -- **value** (string): a string -- returns **toHexString** (string): a hex representation of *value* - -TRIM() ------- - -`TRIM(value, type) → strippedString` - -Return the string *value* with whitespace stripped from the start and/or end. - -The optional *type* parameter specifies from which parts of the string the -whitespace is stripped. [LTRIM()](#ltrim) -and [RTRIM()](#rtrim) are preferred -however. - -- **value** (string): a string -- **type** (number, *optional*): strip whitespace from the - - `0` – start and end of the string (default) - - `1` – start of the string only - - `2` – end of the string only - -`TRIM(value, chars) → strippedString` - -Return the string *value* with whitespace stripped from the start and end. - -- **value** (string): a string -- **chars** (string, *optional*): override the characters that should - be removed from the string. It defaults to `\r\n \t` (i.e. `0x0d`, `0x0a`, - `0x20` and `0x09`). -- returns **strippedString** (string): *value* without *chars* on both sides - -```js -TRIM("foo bar") // "foo bar" -TRIM(" foo bar ") // "foo bar" -TRIM("--==[foo-bar]==--", "-=[]") // "foo-bar" -TRIM(" foobar\t \r\n ") // "foobar" -TRIM(";foo;bar;baz, ", ",; ") // "foo;bar;baz" -``` - -UPPER() -------- - -`UPPER(value) → upperCaseString` - -Convert lower-case letters in *value* to their upper-case counterparts. -All other characters are returned unchanged. - -- **value** (string): a string -- returns **upperCaseString** (string): *value* with lower-case characters converted - to upper-case characters - -UUID() ------- - -`UUID() → UUIDString` - -Return a universally unique identifier value. - -- returns **UUIDString** (string): a universally unique identifier diff --git a/Documentation/Books/AQL/Functions/TypeCast.md b/Documentation/Books/AQL/Functions/TypeCast.md deleted file mode 100644 index 601bf9d82b0a..000000000000 --- a/Documentation/Books/AQL/Functions/TypeCast.md +++ /dev/null @@ -1,273 +0,0 @@ -Type check and cast functions -============================= - -Some operators expect their operands to have a certain data type. For example, -logical operators expect their operands to be boolean values, and the arithmetic -operators expect their operands to be numeric values. If an operation is performed -with operands of other types, an automatic conversion to the expected types is -tried. This is called implicit type casting. It helps to avoid query -aborts. - -Type casts can also be performed upon request by invoking a type cast function. -This is called explicit type casting. AQL offers several functions for this. -Each of the these functions takes an operand of any data type and returns a result -value with the type corresponding to the function name. For example, *TO_NUMBER()* -will return a numeric value. - -Type casting functions ----------------------- - -### TO_BOOL() - -`TO_BOOL(value) → bool` - -Take an input *value* of any type and convert it into the appropriate -boolean value. - -- **value** (any): input of arbitrary type -- returns **bool** (boolean): - - *null* is converted to *false* - - Numbers are converted to *true*, except for 0, which is converted to *false* - - Strings are converted to *true* if they are non-empty, and to *false* otherwise - - Arrays are always converted to *true* (even if empty) - - Objects / documents are always converted to *true* - -It's also possible to use double negation to cast to boolean: - -```js -!!1 // true -!!0 // false -!!-0.0 // false -not not 1 // true -!!"non-empty string" // true -!!"" // false -``` - -`TO_BOOL()` is preferred however, because it states the intention clearer. - -### TO_NUMBER() - -`TO_NUMBER(value) → number` - -Take an input *value* of any type and convert it into a numeric value. - -- **value** (any): input of arbitrary type -- returns **number** (number): - - *null* and *false* are converted to the value *0* - - *true* is converted to *1* - - Numbers keep their original value - - Strings are converted to their numeric equivalent if the string contains a - valid representation of a number. Whitespace at the start and end of the string - is allowed. String values that do not contain any valid representation of a number - will be converted to the number *0*. - - An empty array is converted to *0*, an array with one member is converted into the - result of `TO_NUMBER()` for its sole member. An array with two or more members is - converted to the number *0*. - - An object / document is converted to the number *0*. - - A unary plus will also cast to a number, but `TO_NUMBER()` is the preferred way: - ```js -+'5' // 5 -+[8] // 8 -+[8,9] // 0 -+{} // 0 - ``` - A unary minus works likewise, except that a numeric value is also negated: - ```js --'5' // -5 --[8] // -8 --[8,9] // 0 --{} // 0 - ``` - -### TO_STRING() - -`TO_STRING(value) → str` - -Take an input *value* of any type and convert it into a string value. - -- **value** (any): input of arbitrary type -- returns **str** (string): - - *null* is converted to an empty string *""* - - *false* is converted to the string *"false"*, *true* to the string *"true"* - - Numbers are converted to their string representations. This can also be a - scientific notation (e.g. "2e-7") - - Arrays and objects / documents are converted to string representations, - which means JSON-encoded strings with no additional whitespace - -```js -TO_STRING(null) // "" -TO_STRING(true) // "true" -TO_STRING(false) // "false" -TO_STRING(123) // "123" -TO_STRING(+1.23) // "1.23" -TO_STRING(-1.23) // "-1.23" -TO_STRING(0.0000002) // "2e-7" -TO_STRING( [1, 2, 3] ) // "[1,2,3]" -TO_STRING( { foo: "bar", baz: null } ) // "{\"foo\":\"bar\",\"baz\":null}" -``` - -### TO_ARRAY() - -`TO_ARRAY(value) → array` - -Take an input *value* of any type and convert it into an array value. - -- **value** (any): input of arbitrary type -- returns **array** (array): - - *null* is converted to an empty array - - Boolean values, numbers and strings are converted to an array containing - the original value as its single element - - Arrays keep their original value - - Objects / documents are converted to an array containing their attribute - **values** as array elements, just like [VALUES()](Document.md#values) - -```js -TO_ARRAY(null) // [] -TO_ARRAY(false) // [false] -TO_ARRAY(true) // [true] -TO_ARRAY(5) // [5] -TO_ARRAY("foo") // ["foo"] -TO_ARRAY([1, 2, "foo"]) // [1, 2, "foo"] -TO_ARRAY({foo: 1, bar: 2, baz: [3, 4, 5]}) // [1, 2, [3, 4, 5]] -``` - -### TO_LIST() - -`TO_LIST(value) → array` - -This is an alias for [TO_ARRAY()](#toarray). - -Type check functions --------------------- - -AQL also offers functions to check the data type of a value at runtime. The -following type check functions are available. Each of these functions takes an -argument of any data type and returns true if the value has the type that is -checked for, and false otherwise. - -### IS_NULL() - -`IS_NULL(value) → bool` - -Check whether *value* is *null*. Identical to `value == null`. - -To test if an attribute exists, see [HAS()](Document.md#has) instead. - -- **value** (any): value to test -- returns **bool** (boolean): *true* if *value* is `null`, - *false* otherwise - -### IS_BOOL() - -`IS_BOOL(value) → bool` - -Check whether *value* is a *boolean* value - -- **value** (any): value to test -- returns **bool** (boolean): *true* if *value* is `true` or `false`, - *false* otherwise - -### IS_NUMBER() - -`IS_NUMBER(value) → bool` - -Check whether *value* is a number - -- **value** (any): value to test -- returns **bool** (boolean): *true* if *value* is a number, - *false* otherwise - -### IS_STRING() - -`IS_STRING(value) → bool` - -Check whether *value* is a string - -- **value** (any): value to test -- returns **bool** (boolean): *true* if *value* is a string, - *false* otherwise - -### IS_ARRAY() - -`IS_ARRAY(value) → bool` - -Check whether *value* is an array / list - -- **value** (any): value to test -- returns **bool** (boolean): *true* if *value* is an array / list, - *false* otherwise - -### IS_LIST() - -`IS_LIST(value) → bool` - -This is an alias for [IS_ARRAY()](#isarray) - -### IS_OBJECT() - -`IS_OBJECT(value) → bool` - -Check whether *value* is an object / document - -- **value** (any): value to test -- returns **bool** (boolean): *true* if *value* is an object / document, - *false* otherwise - -### IS_DOCUMENT() - -`IS_DOCUMENT(value) → bool` - -This is an alias for [IS_OBJECT()](#isobject) - -### IS_DATESTRING() - -`IS_DATESTRING(str) → bool` - -Check whether *value* is a string that can be used in a date function. -This includes partial dates such as *"2015"* or *"2015-10"* and strings -containing properly formatted but invalid dates such as *"2015-02-31"*. - -- **str** (string): date string to test -- returns **bool** (boolean): *true* if *str* is a correctly formatted date string, - *false* otherwise including all non-string values, even if some of them may be usable - in date functions (numeric timestamps) - -### IS_KEY() - -`IS_KEY(str) → bool` - -Check whether *value* is a string that can be used as a -document key, i.e. as the value of the *_key* attribute. -See [Naming Conventions for Document Keys](../../Manual/DataModeling/NamingConventions/DocumentKeys.html). - -- **str** (string): document key to test -- returns **bool** (boolean): whether *str* can be used as document key - -### TYPENAME() - -`TYPENAME(value) → typeName` - -Return the data type name of *value*. - -- **value** (any): input of arbitrary type -- returns **typeName** (string): data type name of *value* - (`"null"`, `"bool"`, `"number"`, `"string"`, `"array"` or `"object"`) - -Example Value | Data Type Name ----------------:|--------------- -`null` | `"null"` -`true` | `"bool"` -`false` | `"bool"` -`123` | `"number"` -`-4.56` | `"number"` -`0` | `"number"` -`"foobar"` | `"string"` -`"123"` | `"string"` -`""` | `"string"` -`[ 1, 2, 3 ]` | `"array"` -`["foo",true]` | `"array"` -`[ ]` | `"array"` -`{"foo":"bar"}` | `"object"` -`{"foo": null}` | `"object"` -`{ }` | `"object"` diff --git a/Documentation/Books/AQL/Fundamentals/BindParameters.md b/Documentation/Books/AQL/Fundamentals/BindParameters.md deleted file mode 100644 index 490d5e980e8a..000000000000 --- a/Documentation/Books/AQL/Fundamentals/BindParameters.md +++ /dev/null @@ -1,122 +0,0 @@ -Bind parameters -=============== - -AQL supports the usage of bind parameters, thus allowing to separate the query -text from literal values used in the query. It is good practice to separate the -query text from the literal values because this will prevent (malicious) -injection of keywords and other collection names into an existing query. This -injection would be dangerous because it may change the meaning of an existing -query. - -Using bind parameters, the meaning of an existing query cannot be changed. Bind -parameters can be used everywhere in a query where literals can be used. - -The syntax for bind parameters is *@name* where *@* signifies that this is a -bind parameter and *name* is the actual parameter name. Parameter names must -start with any of the letters *a* to *z* (upper or lower case) or a digit -(*0* to *9*), and can be followed by any letter, digit or the underscore symbol. - -```js -FOR u IN users - FILTER u.id == @id && u.name == @name - RETURN u -``` - -The bind parameter values need to be passed along with the query when it is -executed, but not as part of the query text itself. In the web interface, -there is a pane next to the query editor where the bind parameters can be -entered. When using `db._query()` (in arangosh for instance), then an -object of key-value pairs can be passed for the parameters. Such an object -can also be passed to the HTTP API endpoint `_api/cursor`, as attribute -value for the key *bindVars*: - -```json -{ - "query": "FOR u IN users FILTER u.id == @id && u.name == @name RETURN u", - "bindVars": { - "id": 123, - "name": "John Smith" - } -} -``` - -Bind parameters that are declared in the query must also be passed a parameter -value, or the query will fail. Specifying parameters that are not declared in -the query will result in an error too. - -Bind variables represent a value like a string, and must not be put in quotes -in the AQL code: - -```js -FILTER u.name == "@name" // wrong -FILTER u.name == @name // correct -``` - -If you need to do string processing (concatenation, etc.) in the query, you -need to use [string functions](../Functions/String.md) to do so: - -```js -FOR u IN users - FILTER u.id == CONCAT('prefix', @id, 'suffix') && u.name == @name - RETURN u -``` - -Bind paramers can be used for both, the dot notation as well as the square -bracket notation for sub-attribute access. They can also be chained: - -```js -LET doc = { foo: { bar: "baz" } } - -RETURN doc.@attr.@subattr -// or -RETURN doc[@attr][@subattr] -``` - -```json -{ - "attr": "foo", - "subattr": "bar" -} -``` - -Both variants in above example return `[ "baz" ]` as query result. - -The whole attribute path, for highly nested data in particular, can also be -specified using the dot notation and a single bind parameter, by passing an -array of strings as parameter value. The elements of the array represent the -attribute keys of the path: - -```js -LET doc = { a: { b: { c: 1 } } } -RETURN doc.@attr -``` - -```json -{ "attr": [ "a", "b", "c" ] } -``` - -The example query returns `[ 1 ]` as result. Note that `{ "attr": "a.b.c" }` -would return the value of an attribute called *a.b.c*, not the value of -attribute *c* with the parents *a* and *b* as `[ "a", "b", "c" ]` would. - -A special type of bind parameter exists for injecting collection names. This -type of bind parameter has a name prefixed with an additional *@* symbol (thus -when using the bind parameter in a query, two *@* symbols must be used). - -```js -FOR u IN @@collection - FILTER u.active == true - RETURN u -``` - -```json -{ "@collection": "myCollection" } -``` - -Keywords can't be replaced by bind-values; i.e. `FOR`, `FILTER`, `IN`, `INBOUND` or function calls. - -Specific information about parameters binding can also be found in: - -- [AQL with Web Interface](../Invocation/WithWebInterface.md) -- [AQL with Arangosh](../Invocation/WithArangosh.md) -- [HTTP Interface for AQL Queries](../../HTTP/AqlQueryCursor/index.html) diff --git a/Documentation/Books/AQL/Fundamentals/DataTypes.md b/Documentation/Books/AQL/Fundamentals/DataTypes.md deleted file mode 100644 index 9651d35f44b9..000000000000 --- a/Documentation/Books/AQL/Fundamentals/DataTypes.md +++ /dev/null @@ -1,263 +0,0 @@ -Data types -========== - -AQL supports both *primitive* data types consisting of exactly one value and -*compound* data types comprised of multiple values. The following types are -available: - -| Data type | Description | -|------------:|-------------| -| **null** | An empty value, also: the absence of a value -| **boolean** | Boolean truth value with possible values *false* and *true* -| **number** | Signed (real) number -| **string** | UTF-8 encoded text value -| **array** / list | Sequence of values, referred to by their positions -| **object** / document | Sequence of values, referred to by their names - -Primitive types ---------------- - -### Null value - -A `null` value can be used to represent an empty or absent value. -It is different from a numerical value of zero (`null != 0`) and other -*falsy* values (`false`, zero-length string, empty array or object). -It is also known as *nil* or *None* in other languages. - -The system may return `null` in the absence of value, for example -if you call a [function](../Functions/README.md) with unsupported values -as arguments or if you try to [access an attribute](DocumentData.md) -which does not exist. - -### Boolean data type - -The Boolean data type has two possible values, `true` and `false`. -They represent the two truth values in logic and mathematics. - -### Numeric literals - -Numeric literals can be integers or real values (floating-point numbers). -They can optionally be signed with the `+` or `-` symbols. -A decimal point `.` is used as separator for the optional fractional part. -The scientific notation (*E-notation*) is also supported. - -``` - 1 - +1 - 42 - -1 --42 - 1.23 --99.99 - 0.5 - .5 - -4.87e103 - -4.87E103 -``` - -The following notations are invalid and will throw a syntax error: - -``` - 1. -01.23 -00.23 -00 -``` - -All numeric values are treated as 64-bit double-precision values internally. -The internal format used is IEEE 754. - -### String literals - -String literals must be enclosed in single or double quotes. If the used quote -character is to be used itself within the string literal, it must be escaped -using the backslash symbol. A literal backslash also needs to be escaped with -a backslash. - -``` -"yikes!" -"don't know" -"this is a \"quoted\" word" -"this is a longer string." -"the path separator on Windows is \\" - -'yikes!' -'don\'t know' -'this is a "quoted" word' -'this is a longer string.' -'the path separator on Windows is \\' -``` - -All string literals must be UTF-8 encoded. It is currently not possible to use -arbitrary binary data if it is not UTF-8 encoded. A workaround to use binary -data is to encode the data using [Base64](https://en.wikipedia.org/wiki/Base64) -or other algorithms on the application -side before storing, and decoding it on application side after retrieval. - -Compound types --------------- - -AQL supports two compound types: - -- **array**: A composition of unnamed values, each accessible - by their positions. Sometimes called *list*. -- **object**: A composition of named values, each accessible - by their names. A *document* is an object at the top level. - -### Arrays / Lists - -The first supported compound type is the array type. Arrays are effectively -sequences of (unnamed / anonymous) values. Individual array elements can be -accessed by their positions. The order of elements in an array is important. - -An *array declaration* starts with a left square bracket `[` and ends with -a right square bracket `]`. The declaration contains zero, one or more -*expression*s, separated from each other with the comma `,` symbol. -Whitespace around elements is ignored in the declaration, thus line breaks, -tab stops and blanks can be used for formatting. - -In the easiest case, an array is empty and thus looks like: - -```json -[ ] -``` - -Array elements can be any legal *expression* values. Nesting of arrays is -supported. - -```json -[ true ] -[ 1, 2, 3 ] -[ -99, "yikes!", [ false, ["no"], [] ], 1 ] -[ [ "fox", "marshal" ] ] -``` - -Individual array values can later be accessed by their positions using the `[]` -accessor. The position of the accessed element must be a numeric -value. Positions start at 0. It is also possible to use negative index values -to access array values starting from the end of the array. This is convenient if -the length of the array is unknown and access to elements at the end of the array -is required. - -```js -// access 1st array element (elements start at index 0) -u.friends[0] - -// access 3rd array element -u.friends[2] - -// access last array element -u.friends[-1] - -// access second to last array element -u.friends[-2] -``` - -### Objects / Documents - -The other supported compound type is the object (or document) type. Objects are a -composition of zero to many attributes. Each attribute is a name/value pair. -Object attributes can be accessed individually by their names. This data type is -also known as dictionary, map, associative array and other names. - -Object declarations start with a left curly bracket `{` and end with a -right curly bracket `}`. An object contains zero to many attribute declarations, -separated from each other with the `,` symbol. Whitespace around elements is ignored -in the declaration, thus line breaks, tab stops and blanks can be used for formatting. - -In the simplest case, an object is empty. Its declaration would then be: - -```json -{ } -``` - -Each attribute in an object is a name/value pair. Name and value of an -attribute are separated using the colon `:` symbol. The name is always a string, -whereas the value can be of any type including sub-objects. - -The attribute name is mandatory - there can't be anonymous values in an object. -It can be specified as a quoted or unquoted string: - -```js -{ name: … } // unquoted -{ 'name': … } // quoted (apostrophe / "single quote mark") -{ "name": … } // quoted (quotation mark / "double quote mark") -``` - -It must be quoted if it contains whitespace, escape sequences or characters -other than ASCII letters (`a`-`z`, `A`-`Z`), digits (`0`-`9`), -underscores (`_`) and dollar signs (`$`). The first character has to be a -letter, underscore or dollar sign. - -If a [keyword](../Fundamentals/Syntax.md#keywords) is used as an attribute name -then the attribute name must be quoted or escaped by ticks or backticks: - -```js -{ return: … } // error, return is a keyword! -{ 'return': … } // quoted -{ "return": … } // quoted -{ `return`: … } // escaped (backticks) -{ ´return´: … } // escaped (ticks) -``` - -Attribute names can be computed using dynamic expressions, too. -To disambiguate regular attribute names from attribute name expressions, -computed attribute names must be enclosed in square brackets `[ … ]`: - -```js -{ [ CONCAT("test/", "bar") ] : "someValue" } -``` - -There is also shorthand notation for attributes which is handy for -returning existing variables easily: - -```js -LET name = "Peter" -LET age = 42 -RETURN { name, age } -``` - -The above is the shorthand equivalent for the generic form: - -```js -LET name = "Peter" -LET age = 42 -RETURN { name: name, age: age } -``` - -Any valid expression can be used as an attribute value. That also means nested -objects can be used as attribute values: - -```js -{ name : "Peter" } -{ "name" : "Vanessa", "age" : 15 } -{ "name" : "John", likes : [ "Swimming", "Skiing" ], "address" : { "street" : "Cucumber lane", "zip" : "94242" } } -``` - -Individual object attributes can later be accessed by their names using the -dot `.` accessor: - -```js -u.address.city.name -u.friends[0].name.first -``` - -Attributes can also be accessed using the square bracket `[]` accessor: - -```js -u["address"]["city"]["name"] -u["friends"][0]["name"]["first"] -``` - -In contrast to the dot accessor, the square brackets allow for expressions: - -```js -LET attr1 = "friends" -LET attr2 = "name" -u[attr1][0][attr2][ CONCAT("fir", "st") ] -``` - -{% hint 'info' %} -If a non-existing attribute is accessed in one or the other way, -the result will be `null`, without error or warning. -{% endhint %} diff --git a/Documentation/Books/AQL/Fundamentals/DocumentData.md b/Documentation/Books/AQL/Fundamentals/DocumentData.md deleted file mode 100644 index e199f5a3adea..000000000000 --- a/Documentation/Books/AQL/Fundamentals/DocumentData.md +++ /dev/null @@ -1,43 +0,0 @@ -Accessing data from collections -=============================== - -Collection data can be accessed by specifying a collection name in a query. A -collection can be understood as an array of documents, and that is how they are -treated in AQL. Documents from collections are normally accessed using the -*FOR* keyword. Note that when iterating over documents from a collection, the -order of documents is undefined. To traverse documents in an explicit and -deterministic order, the *SORT* keyword should be used in addition. - -Data in collections is stored in documents, with each document potentially -having different attributes than other documents. This is true even for -documents of the same collection. - -It is therefore quite normal to encounter documents that do not have some or all -of the attributes that are queried in an AQL query. In this case, the -non-existing attributes in the document will be treated as if they would exist -with a value of *null*. That means that comparing a document attribute to -*null* will return true if the document has the particular attribute and the -attribute has a value of *null*, or that the document does not have the -particular attribute at all. - -For example, the following query will return all documents from the collection -*users* that have a value of *null* in the attribute *name*, plus all documents -from *users* that do not have the *name* attribute at all: - - FOR u IN users - FILTER u.name == null - RETURN u - -Furthermore, *null* is less than any other value (excluding *null* itself). That -means documents with non-existing attributes may be included in the result -when comparing attribute values with the less than or less equal operators. - -For example, the following query will return all documents from the collection -*users* that have an attribute *age* with a value less than *39*, but also all -documents from the collection that do not have the attribute *age* at all. - - FOR u IN users - FILTER u.age < 39 - RETURN u - -This behavior should always be taken into account when writing queries. diff --git a/Documentation/Books/AQL/Fundamentals/QueryErrors.md b/Documentation/Books/AQL/Fundamentals/QueryErrors.md deleted file mode 100644 index d46ad014dab8..000000000000 --- a/Documentation/Books/AQL/Fundamentals/QueryErrors.md +++ /dev/null @@ -1,32 +0,0 @@ -Errors -====== - -Issuing an invalid query to the server will result in a parse error if the query -is syntactically invalid. ArangoDB will detect such errors during query -inspection and abort further processing. Instead, the error number and an error -message are returned so that the errors can be fixed. - -If a query passes the parsing stage, all collections referenced in the query -will be opened. If any of the referenced collections is not present, query -execution will again be aborted and an appropriate error message will be -returned. - -Under some circumstances, executing a query may also produce run-time errors -that cannot be predicted from inspecting the query text alone. This is because -queries may use data from collections that may also be inhomogeneous. Some -examples that will cause run-time errors are: - -- Division by zero: Will be triggered when an attempt is made to use the value - *0* as the divisor in an arithmetic division or modulus operation -- Invalid operands for arithmetic operations: Will be triggered when an attempt - is made to use any non-numeric values as operands in arithmetic operations. - This includes unary (unary minus, unary plus) and binary operations (plus, - minus, multiplication, division, and modulus) -- Invalid operands for logical operations: Will be triggered when an attempt is - made to use any non-boolean values as operand(s) in logical operations. This - includes unary (logical not/negation), binary (logical and, logical or), and - the ternary operators - -Please refer to the [Arango Errors](../../Manual/Appendix/ErrorCodes.html) page -for a list of error codes and meanings. - diff --git a/Documentation/Books/AQL/Fundamentals/QueryResults.md b/Documentation/Books/AQL/Fundamentals/QueryResults.md deleted file mode 100644 index f86a5a7c25c6..000000000000 --- a/Documentation/Books/AQL/Fundamentals/QueryResults.md +++ /dev/null @@ -1,59 +0,0 @@ -Query results -============= - -Result sets ------------ - -The result of an AQL query is an array of values. The individual values in the -result array may or may not have a homogeneous structure, depending on what is -actually queried. - -For example, when returning data from a collection with inhomogeneous documents -(the individual documents in the collection have different attribute names) -without modification, the result values will as well have an inhomogeneous -structure. Each result value itself is a document: - -```js -FOR u IN users - RETURN u -``` - -```json -[ { "id": 1, "name": "John", "active": false }, - { "age": 32, "id": 2, "name": "Vanessa" }, - { "friends": [ "John", "Vanessa" ], "id": 3, "name": "Amy" } ] -``` - -However, if a fixed set of attributes from the collection is queried, then the -query result values will have a homogeneous structure. Each result value is -still a document: - -```js -FOR u IN users - RETURN { "id": u.id, "name": u.name } -``` - -```json -[ { "id": 1, "name": "John" }, - { "id": 2, "name": "Vanessa" }, - { "id": 3, "name": "Amy" } ] -``` - -It is also possible to query just scalar values. In this case, the result set -is an array of scalars, and each result value is a scalar value: - -```js -FOR u IN users - RETURN u.id -``` - -```json -[ 1, 2, 3 ] -``` - -If a query does not produce any results because no matching data can be -found, it will produce an empty result array: - -```json -[ ] -``` diff --git a/Documentation/Books/AQL/Fundamentals/README.md b/Documentation/Books/AQL/Fundamentals/README.md deleted file mode 100644 index 32d3fe05f2e9..000000000000 --- a/Documentation/Books/AQL/Fundamentals/README.md +++ /dev/null @@ -1,14 +0,0 @@ -AQL Fundamentals -================ - - -* [AQL Syntax](Syntax.md) explains the structure of the AQL language. -* [Data Types](DataTypes.md) describes the primitive and compound data types supported by AQL. -* [Bind Parameters](BindParameters.md): AQL supports the usage of bind parameters. This allows to separate the query text from literal values used in the query. -* [Type and value order](TypeValueOrder.md): AQL uses a set of rules (using values and types) for equality checks and comparisons. -* [Accessing Data from Collections](DocumentData.md): describes the impact of non-existent or null attributes for selection queries. -* [Query Results](QueryResults.md): the result of an AQL query is an array of values. -* [Query Errors](QueryErrors.md): errors may arise from the AQL parsing or execution. - -Learn more about optimizing your queries by going through the -[Performance Course](https://www.arangodb.com/arangodb-performance-course/). diff --git a/Documentation/Books/AQL/Fundamentals/Syntax.md b/Documentation/Books/AQL/Fundamentals/Syntax.md deleted file mode 100644 index 8e675c3d42ae..000000000000 --- a/Documentation/Books/AQL/Fundamentals/Syntax.md +++ /dev/null @@ -1,223 +0,0 @@ -AQL Syntax -========== - -Query types ------------ - -An AQL query must either return a result (indicated by usage of the *RETURN* -keyword) or execute a data-modification operation (indicated by usage -of one of the keywords *INSERT*, *UPDATE*, *REPLACE*, *REMOVE* or *UPSERT*). The AQL -parser will return an error if it detects more than one data-modification -operation in the same query or if it cannot figure out if the query is meant -to be a data retrieval or a modification operation. - -AQL only allows *one* query in a single query string; thus semicolons to -indicate the end of one query and separate multiple queries (as seen in SQL) are -not allowed. - -Whitespace ----------- - -Whitespaces (blanks, carriage returns, line feeds, and tab stops) can be used -in the query text to increase its readability. Tokens have to be separated by -any number of whitespaces. Whitespace within strings or names must be enclosed -in quotes in order to be preserved. - -Comments --------- - -Comments can be embedded at any position in a query. The text contained in the -comment is ignored by the AQL parser. - -Multi-line comments cannot be nested, which means subsequent comment starts within -comments are ignored, comment ends will end the comment. - -AQL supports two types of comments: - -- Single line comments: These start with a double forward slash and end at - the end of the line, or the end of the query string (whichever is first). -- Multi line comments: These start with a forward slash and asterisk, and - end with an asterisk and a following forward slash. They can span as many - lines as necessary. - - - /* this is a comment */ RETURN 1 - /* these */ RETURN /* are */ 1 /* multiple */ + /* comments */ 1 - /* this is - a multi line - comment */ - // a single line comment - -Keywords --------- - -On the top level, AQL offers the following operations: -- `FOR`: array iteration -- `RETURN`: results projection -- `FILTER`: non-view results filtering -- `SEARCH`: view results filtering -- `SORT`: result sorting -- `LIMIT`: result slicing -- `LET`: variable assignment -- `COLLECT`: result grouping -- `INSERT`: insertion of new documents -- `UPDATE`: (partial) update of existing documents -- `REPLACE`: replacement of existing documents -- `REMOVE`: removal of existing documents -- `UPSERT`: insertion or update of existing documents - -Each of the above operations can be initiated in a query by using a keyword of -the same name. An AQL query can (and typically does) consist of multiple of the -above operations. - -An example AQL query may look like this: - -```js -FOR u IN users - FILTER u.type == "newbie" && u.active == true - RETURN u.name -``` - -In this example query, the terms *FOR*, *FILTER*, and *RETURN* initiate the -higher-level operation according to their name. These terms are also keywords, -meaning that they have a special meaning in the language. - -For example, the query parser will use the keywords to find out which high-level -operations to execute. That also means keywords can only be used at certain -locations in a query. This also makes all keywords reserved words that must not -be used for other purposes than they are intended for. - -For example, it is not possible to use a keyword as a collection or attribute -name. If a collection or attribute need to have the same name as a keyword, the -collection or attribute name needs to be quoted. - -Keywords are case-insensitive, meaning they can be specified in lower, upper, or -mixed case in queries. In this documentation, all keywords are written in upper -case to make them distinguishable from other query parts. - -There are a few more keywords in addition to the higher-level operation keywords. -Additional keywords may be added in future versions of ArangoDB. -The complete list of keywords is currently: - -
-
    -
  • AGGREGATE
  • -
  • ALL
  • -
  • AND
  • -
  • ANY
  • -
  • ASC
  • -
  • COLLECT
  • -
  • DESC
  • -
  • DISTINCT
  • -
  • FALSE
  • -
  • FILTER
  • -
  • FOR
  • -
  • GRAPH
  • -
  • IN
  • -
  • INBOUND
  • -
  • INSERT
  • -
  • INTO
  • -
  • LET
  • -
  • LIMIT
  • -
  • NONE
  • -
  • NOT
  • -
  • NULL
  • -
  • OR
  • -
  • OUTBOUND
  • -
  • REMOVE
  • -
  • REPLACE
  • -
  • RETURN
  • -
  • SHORTEST_PATH
  • -
  • SORT
  • -
  • TRUE
  • -
  • UPDATE
  • -
  • UPSERT
  • -
  • WITH
  • -
-
- -Names ------ - -In general, names are used to identify objects (collections, attributes, -variables, and functions) in AQL queries. - -The maximum supported length of any name is 64 bytes. Names in AQL are always -case-sensitive. - -Keywords must not be used as names. If a reserved keyword should be used as a -name, the name must be enclosed in backticks or forward ticks. Enclosing a name in -backticks or forward ticks makes it possible to use otherwise reserved keywords -as names. An example for this is: - -```js -FOR f IN `filter` - RETURN f.`sort` -``` - -Due to the backticks, *filter* and *sort* are interpreted as names and not as -keywords here. - -The example can alternatively written as: - -```js -FOR f IN ´filter´ - RETURN f.´sort´ -``` - -### Collection names - -Collection names can be used in queries as they are. If a collection happens to -have the same name as a keyword, the name must be enclosed in backticks. - -Please refer to the [Naming Conventions in ArangoDB](../../Manual/DataModeling/NamingConventions/CollectionAndViewNames.html) -about collection naming conventions. - -AQL currently has a limit of up to 256 collections used in one AQL query. -This limit applies to the sum of all involved document and edge collections. - -### Attribute names - -When referring to attributes of documents from a collection, the fully qualified -attribute name must be used. This is because multiple collections with ambiguous -attribute names may be used in a query. To avoid any ambiguity, it is not -allowed to refer to an unqualified attribute name. - -Please refer to the [Naming Conventions in ArangoDB](../../Manual/DataModeling/NamingConventions/AttributeNames.html) -for more information about the attribute naming conventions. - -```js -FOR u IN users - FOR f IN friends - FILTER u.active == true && f.active == true && u.id == f.userId - RETURN u.name -``` - -In the above example, the attribute names *active*, *name*, *id*, and *userId* -are qualified using the collection names they belong to (*u* and *f* -respectively). - -### Variable names - -AQL allows the user to assign values to additional variables in a query. All -variables that are assigned a value must have a name that is unique within the -context of the query. Variable names must be different from the names of any -collection name used in the same query. - -```js -FOR u IN users - LET friends = u.friends - RETURN { "name" : u.name, "friends" : friends } -``` - -In the above query, *users* is a collection name, and both *u* and *friends* are -variable names. This is because the *FOR* and *LET* operations need target -variables to store their intermediate results. - -Allowed characters in variable names are the letters *a* to *z* (both in lower -and upper case), the numbers *0* to *9*, the underscore (*_*) symbol and the -dollar (*$*) sign. A variable name must not start with a number. If a variable name -starts with the underscore character, the underscore must be followed by least one -letter (a-z or A-Z) or digit (0-9). - -The dollar sign can be used only as the very first character in a variable name. diff --git a/Documentation/Books/AQL/Fundamentals/TypeValueOrder.md b/Documentation/Books/AQL/Fundamentals/TypeValueOrder.md deleted file mode 100644 index 5af7a7791ed5..000000000000 --- a/Documentation/Books/AQL/Fundamentals/TypeValueOrder.md +++ /dev/null @@ -1,125 +0,0 @@ -Type and value order -==================== - -When checking for equality or inequality or when determining the sort order of -values, AQL uses a deterministic algorithm that takes both the data types and -the actual values into account. - -The compared operands are first compared by their data types, and only by their -data values if the operands have the same data types. - -The following type order is used when comparing data types: - - null < bool < number < string < array/list < object/document - -This means *null* is the smallest type in AQL and *document* is the type with -the highest order. If the compared operands have a different type, then the -comparison result is determined and the comparison is finished. - -For example, the boolean *true* value will always be less than any numeric or -string value, any array (even an empty array) or any object / document. Additionally, any -string value (even an empty string) will always be greater than any numeric -value, a boolean value, *true* or *false*. - - null < false - null < true - null < 0 - null < '' - null < ' ' - null < '0' - null < 'abc' - null < [ ] - null < { } - - false < true - false < 0 - false < '' - false < ' ' - false < '0' - false < 'abc' - false < [ ] - false < { } - - true < 0 - true < '' - true < ' ' - true < '0' - true < 'abc' - true < [ ] - true < { } - - 0 < '' - 0 < ' ' - 0 < '0' - 0 < 'abc' - 0 < [ ] - 0 < { } - - '' < ' ' - '' < '0' - '' < 'abc' - '' < [ ] - '' < { } - - [ ] < { } - -If the two compared operands have the same data types, then the operands values -are compared. For the primitive types (null, boolean, number, and string), the -result is defined as follows: - -- null: *null* is equal to *null* -- boolean: *false* is less than *true* -- number: numeric values are ordered by their cardinal value -- string: string values are ordered using a localized comparison, using the configured - [server language](../../Manual/Programs/Arangod/Global.html#default-language) - for sorting according to the alphabetical order rules of that language - -Note: unlike in SQL, *null* can be compared to any value, including *null* -itself, without the result being converted into *null* automatically. - -For compound, types the following special rules are applied: - -Two array values are compared by comparing their individual elements position by -position, starting at the first element. For each position, the element types -are compared first. If the types are not equal, the comparison result is -determined, and the comparison is finished. If the types are equal, then the -values of the two elements are compared. If one of the arrays is finished and -the other array still has an element at a compared position, then *null* will be -used as the element value of the fully traversed array. - -If an array element is itself a compound value (an array or an object / document), then the -comparison algorithm will check the element's sub values recursively. The element's -sub-elements are compared recursively. - - [ ] < [ 0 ] - [ 1 ] < [ 2 ] - [ 1, 2 ] < [ 2 ] - [ 99, 99 ] < [ 100 ] - [ false ] < [ true ] - [ false, 1 ] < [ false, '' ] - -Two object / documents operands are compared by checking attribute names and value. The -attribute names are compared first. Before attribute names are compared, a -combined array of all attribute names from both operands is created and sorted -lexicographically. This means that the order in which attributes are declared -in an object / document is not relevant when comparing two objects / documents. - -The combined and sorted array of attribute names is then traversed, and the -respective attributes from the two compared operands are then looked up. If one -of the objects / documents does not have an attribute with the sought name, its attribute -value is considered to be *null*. Finally, the attribute value of both -objects / documents is compared using the before mentioned data type and value comparison. -The comparisons are performed for all object / document attributes until there is an -unambiguous comparison result. If an unambiguous comparison result is found, the -comparison is finished. If there is no unambiguous comparison result, the two -compared objects / documents are considered equal. - - { } < { "a" : 1 } - { } < { "a" : null } - { "a" : 1 } < { "a" : 2 } - { "b" : 1 } < { "a" : 0 } - { "a" : { "c" : true } } < { "a" : { "c" : 0 } } - { "a" : { "c" : true, "a" : 0 } } < { "a" : { "c" : false, "a" : 1 } } - - { "a" : 1, "b" : 2 } == { "b" : 2, "a" : 1 } - diff --git a/Documentation/Books/AQL/Graphs/KShortestPaths.md b/Documentation/Books/AQL/Graphs/KShortestPaths.md deleted file mode 100644 index ecdbdcb923b2..000000000000 --- a/Documentation/Books/AQL/Graphs/KShortestPaths.md +++ /dev/null @@ -1,220 +0,0 @@ -k Shortest Paths in AQL -======================= - -General query idea --------------------- - -This type of query is supposed to find the first *k* paths in order of length -(or weight) between two given documents, *startVertex* and *targetVertex* in -your graph. - -Every such path will be returned as a JSON object with three components: - -- an array containing the `vertices` on the path -- an array containing the `edges` on the path -- the `weight` of the path, that is the sum of all edge weights - -If no *weightAttribute* is given, the weight of the path is just its length. - -**Example** - -Let su take a look at a simple example to explain how it works. -This is the graph that we are going to find some shortest path on: - -![train_map](train_map.png) - -Each ellipse stands for a train station with the name of the city written inside -of it. They are the vertices of the graph. Arrows represent train connections -between cities and are the edges of the graph. The numbers near the arrows -describe how long it takes to get from one station to another. They are used -as edge weights. - -Let us assume that we want to go from **Aberdeen** to **London** by train. - -We expect to see the following vertices on *the* shortest path, in this order: - -1. Aberdeen -2. Leuchars -3. Edinburgh -4. York -5. London - -By the way, the weight of the path is: 1.5 + 1.5 + 3.5 + 1.8 = **8.3**. - -Let us look at alternative paths next, for example because we know that the -direct connection between York and London does not operate currently. -An alternative path, which is slightly longer, goes like this: - -1. Aberdeen -2. Leuchars -3. Edinburgh -4. York -5. **Carlisle** -6. **Birmingham** -7. London - -Its weight is: 1.5 + 1.5 + 3.5 + 2.0 + 1.5 = **10.0**. - -Another route goes via Glasgow. There are seven stations on the path as well, -however, it is quicker if we compare the edge weights: - -1. Aberdeen -2. Leuchars -3. Edinburgh -4. **Glasgow** -5. Carlisle -6. Birmingham -7. London - -The path weight is lower: 1.5 + 1.5 + 1.0 + 1.0 + 2.0 + 1.5 = **8.5**. - -Syntax ------- - -The syntax for k Shortest Paths queries is similar to the one for -[Shortest Path](ShortestPath.md) and there are also two options to either -use a named graph or a set of edge collections. It only emits a path -variable however, whereas SHORTEST_PATH emits a vertex and an edge variable. - -{% hint 'warning' %} -It is highly recommended that you use a **LIMIT** statement, as -k Shortest Paths is a potentially expensive operation. On large connected -graphs it can return a large number of paths, or perform an expensive -(but unsuccessful) search for more short paths. -{% endhint %} - -### Working with named graphs - -``` -FOR path - IN OUTBOUND|INBOUND|ANY K_SHORTEST_PATHS - startVertex TO targetVertex - GRAPH graphName - [OPTIONS options] - [LIMIT offset, count] -``` - -- `FOR`: emits the variable **path** which contains one path as an object containing - `vertices`, `edges`, and the `weight` of the path. -- `IN` `OUTBOUND|INBOUND|ANY`: defines in which direction - edges are followed (outgoing, incoming, or both) -- `K_SHORTEST_PATHS`: the keyword to compute k Shortest Paths -- **startVertex** `TO` **targetVertex** (both string|object): the two vertices between - which the paths will be computed. This can be specified in the form of - a ID string or in the form of a document with the attribute `_id`. All other - values will lead to a warning and an empty result. If one of the specified - documents does not exist, the result is empty as well and there is no warning. -- `GRAPH` **graphName** (string): the name identifying the named graph. Its vertex and - edge collections will be looked up. -- `OPTIONS` **options** (object, *optional*): used to modify the execution of the - traversal. Only the following attributes have an effect, all others are ignored: - - **weightAttribute** (string): a top-level edge attribute that should be used - to read the edge weight. If the attribute does not exist or is not numeric, the - *defaultWeight* will be used instead. - - **defaultWeight** (number): this value will be used as fallback if there is - no *weightAttribute* in the edge document, or if it's not a number. The default - is 1. -- `LIMIT` (see [LIMIT operation](../Operations/Limit.html), *optional*): - the maximal number of paths to return. It is highly recommended to use - a `LIMIT` for `K_SHORTEST_PATHS`. - -### Working with collection sets - -``` -FOR path - IN OUTBOUND|INBOUND|ANY K_SHORTEST_PATHS - startVertex TO targetVertex - edgeCollection1, ..., edgeCollectionN - [OPTIONS options] - [LIMIT offset, count] -``` - -Instead of `GRAPH graphName` you can specify a list of edge collections. -The involved vertex collections are determined by the edges of the given -edge collections. - -### Traversing in mixed directions - -For k shortest paths with a list of edge collections you can optionally specify the -direction for some of the edge collections. Say for example you have three edge -collections *edges1*, *edges2* and *edges3*, where in *edges2* the direction -has no relevance, but in *edges1* and *edges3* the direction should be taken into -account. In this case you can use *OUTBOUND* as general search direction and *ANY* -specifically for *edges2* as follows: - -``` -FOR vertex IN OUTBOUND K_SHORTEST_PATHS - startVertex TO targetVertex - edges1, ANY edges2, edges3 -``` - -All collections in the list that do not specify their own direction will use the -direction defined after `IN` (here: `OUTBOUND`). This allows to use a different -direction for each collection in your path search. - -Examples --------- - -We load an example graph to get a named graph that reflects some possible -train connections in Europe and North America. - -![train_map](train_map.png) - - @startDocuBlockInline GRAPHKSP_01_create_graph - @EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_01_create_graph} - ~addIgnoreCollection("places"); - ~addIgnoreCollection("connections"); - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("kShortestPathsGraph"); - db.places.toArray(); - db.connections.toArray(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock GRAPHKSP_01_create_graph - -Suppose we want to query a route from **Aberdeen** to **London**, and compare -the outputs of SHORTEST_PATH and K_SHORTEST_PATHS with LIMIT 1. Note that while -SHORTEST_PATH and K_SHORTEST_PATH with LIMIT 1 should return a path of the same -length (or weight), they do not need to return the same path. - - @startDocuBlockInline GRAPHKSP_02_Aberdeen_to_London - @EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_02_Aberdeen_to_London} - db._query("FOR v, e IN OUTBOUND SHORTEST_PATH 'places/Aberdeen' TO 'places/London' GRAPH 'kShortestPathsGraph' RETURN [v,e]"); - db._query("FOR p IN OUTBOUND K_SHORTEST_PATHS 'places/Aberdeen' TO 'places/London' GRAPH 'kShortestPathsGraph' LIMIT 1 RETURN p"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock GRAPHKSP_02_Aberdeen_to_London - -Next, we can ask for more than one option for a route: - - @startDocuBlockInline GRAPHKSP_03_Aberdeen_to_London - @EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_03_Aberdeen_to_London} - db._query("FOR p IN OUTBOUND K_SHORTEST_PATHS 'places/Aberdeen' TO 'places/London' GRAPH 'kShortestPathsGraph' LIMIT 3 RETURN p"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock GRAPHKSP_03_Aberdeen_to_London - -If we ask for routes that don't exist we get an empty result: - - @startDocuBlockInline GRAPHKSP_04_Aberdeen_to_Toronto - @EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_04_Aberdeen_to_Toronto} - db._query("FOR p IN OUTBOUND K_SHORTEST_PATHS 'places/Aberdeen' TO 'places/Toronto' GRAPH 'kShortestPathsGraph' LIMIT 3 RETURN p"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock GRAPHKSP_04_Aberdeen_to_Toronto - -We can use the attribute *travelTime* that connections have as edge weights to -take into account which connections are quicker: - - @startDocuBlockInline GRAPHKSP_05_StAndrews_to_Cologne - @EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_05_StAndrews_to_Cologne} - db._query("FOR p IN OUTBOUND K_SHORTEST_PATHS 'places/StAndrews' TO 'places/Cologne' GRAPH 'kShortestPathsGraph' OPTIONS { 'weightAttribute': 'travelTime', defaultWeight: '15'} LIMIT 3 RETURN p"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock GRAPHKSP_05_StAndrews_to_Cologne - -And finally clean up by removing the named graph: - - @startDocuBlockInline GRAPHKSP_99_drop_graph - @EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_99_drop_graph} - var examples = require("@arangodb/graph-examples/example-graph.js"); - examples.dropGraph("kShortestPathsGraph"); - ~removeIgnoreCollection("places"); - ~removeIgnoreCollection("connections"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock GRAPHKSP_99_drop_graph diff --git a/Documentation/Books/AQL/Graphs/README.md b/Documentation/Books/AQL/Graphs/README.md deleted file mode 100644 index cead9d723cf1..000000000000 --- a/Documentation/Books/AQL/Graphs/README.md +++ /dev/null @@ -1,34 +0,0 @@ -Graphs in AQL -============= - -There are multiple ways to work with [graphs in ArangoDB](../../Manual/Graphs/index.html), -as well as different ways to query your graphs using AQL. - -The two options in managing graphs are to either use - -- named graphs where ArangoDB manages the collections involved in one graph, or -- graph functions on a combination of document and edge collections. - -Named graphs can be defined through the [graph-module](../../Manual/Graphs/GeneralGraphs/index.html) -or via the [web interface](../../Manual/Programs/WebInterface/index.html). -The definition contains the name of the graph, and the vertex and edge collections -involved. Since the management functions are layered on top of simple sets of -document and edge collections, you can also use regular AQL functions to work with them. - -Both variants (named graphs and loosely coupled collection sets a.k.a. anonymous graphs) -are supported by the AQL language constructs for graph querying. These constructs -make full use of optimizations and therefore best performance is to be expected: - -- [AQL Traversals](Traversals.md) to follow edges connected to a start vertex, - up to a variable depth. It can be combined with AQL filter conditions. - -- [AQL Shortest Path](ShortestPath.md) to find the vertices and edges between two - given vertices, with as few hops as possible. - -These types of queries are only useful if you use edge collections and/or graphs in -your data model. - -{% hint 'info' %} -New to graphs? [**Take our free graph course for freshers**](https://www.arangodb.com/arangodb-graph-course/) -and get from zero knowledge to advanced query techniques. -{% endhint %} diff --git a/Documentation/Books/AQL/Graphs/ShortestPath.md b/Documentation/Books/AQL/Graphs/ShortestPath.md deleted file mode 100644 index b814fe32c1cd..000000000000 --- a/Documentation/Books/AQL/Graphs/ShortestPath.md +++ /dev/null @@ -1,170 +0,0 @@ -Shortest Path in AQL -==================== - -General query idea ------------------- - -This type of query is supposed to find the shortest path between two given documents -(*startVertex* and *targetVertex*) in your graph. For all vertices on this shortest -path you will get a result in form of a set with two items: - -1. The vertex on this path. -2. The edge pointing to it. - -### Example execution - -Let's take a look at a simple example to explain how it works. -This is the graph that we are going to find a shortest path on: - -![traversal graph](traversal_graph.png) - -Now we use the following parameters for our query: - -1. We start at the vertex **A**. -2. We finish with the vertex **D**. - -So obviously we will have the vertices **A**, **B**, **C** and **D** on the -shortest path in exactly this order. Than the shortest path statement will -return the following pairs: - -| Vertex | Edge | -|--------|-------| -| A | null | -| B | A → B | -| C | B → C | -| D | C → D | - -Note: The first edge will always be `null` because there is no edge pointing -to the *startVertex*. - -Syntax ------- - -Now let's see how we can write a shortest path query. -You have two options here, you can either use a named graph or a set of edge -collections (anonymous graph). - -### Working with named graphs - -``` -FOR vertex[, edge] - IN OUTBOUND|INBOUND|ANY SHORTEST_PATH - startVertex TO targetVertex - GRAPH graphName - [OPTIONS options] -``` - -- `FOR`: emits up to two variables: - - **vertex** (object): the current vertex on the shortest path - - **edge** (object, *optional*): the edge pointing to the vertex -- `IN` `OUTBOUND|INBOUND|ANY`: defines in which direction edges are followed - (outgoing, incoming, or both) -- **startVertex** `TO` **targetVertex** (both string|object): the two vertices between - which the shortest path will be computed. This can be specified in the form of - an ID string or in the form of a document with the attribute `_id`. All other - values will lead to a warning and an empty result. If one of the specified - documents does not exist, the result is empty as well and there is no warning. -- `GRAPH` **graphName** (string): the name identifying the named graph. Its vertex and - edge collections will be looked up. -- `OPTIONS` **options** (object, *optional*): used to modify the execution of the - traversal. Only the following attributes have an effect, all others are ignored: - - **weightAttribute** (string): a top-level edge attribute that should be used - to read the edge weight. If the attribute is not existent or not numeric, the - *defaultWeight* will be used instead. - - **defaultWeight** (number): this value will be used as fallback if there is - no *weightAttribute* in the edge document, or if it's not a number. The default - is 1. - -### Working with collection sets - -``` -FOR vertex[, edge] - IN OUTBOUND|INBOUND|ANY SHORTEST_PATH - startVertex TO targetVertex - edgeCollection1, ..., edgeCollectionN - [OPTIONS options] -``` - -Instead of `GRAPH graphName` you may specify a list of edge collections (anonymous -graph). The involved vertex collections are determined by the edges of the given -edge collections. The rest of the behavior is similar to the named version. - -### Traversing in mixed directions - -For shortest path with a list of edge collections you can optionally specify the -direction for some of the edge collections. Say for example you have three edge -collections *edges1*, *edges2* and *edges3*, where in *edges2* the direction -has no relevance, but in *edges1* and *edges3* the direction should be taken into -account. In this case you can use *OUTBOUND* as general search direction and *ANY* -specifically for *edges2* as follows: - -``` -FOR vertex IN OUTBOUND SHORTEST_PATH - startVertex TO targetVertex - edges1, ANY edges2, edges3 -``` - -All collections in the list that do not specify their own direction will use the -direction defined after *IN* (here: *OUTBOUND*). This allows to use a different -direction for each collection in your path search. - -Conditional shortest path -------------------------- - -The SHORTEST_PATH computation will only find an unconditioned shortest path. -With this construct it is not possible to define a condition like: "Find the -shortest path where all edges are of type *X*". If you want to do this, use a -normal [Traversal](Traversals.md) instead with the option `{bfs: true}` in -combination with `LIMIT 1`. - -Please also consider [to use `WITH`](../Operations/With.md) to specify the collections you expect to be involved. - -Examples --------- -We will create a simple symmetric traversal demonstration graph: - -![traversal graph](traversal_graph.png) - - @startDocuBlockInline GRAPHSP_01_create_graph - @EXAMPLE_ARANGOSH_OUTPUT{GRAPHSP_01_create_graph} - ~addIgnoreCollection("circles"); - ~addIgnoreCollection("edges"); - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("traversalGraph"); - db.circles.toArray(); - db.edges.toArray(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock GRAPHSP_01_create_graph - -We start with the shortest path from **A** to **D** as above: - - @startDocuBlockInline GRAPHSP_02_A_to_D - @EXAMPLE_ARANGOSH_OUTPUT{GRAPHSP_02_A_to_D} - db._query("FOR v, e IN OUTBOUND SHORTEST_PATH 'circles/A' TO 'circles/D' GRAPH 'traversalGraph' RETURN [v._key, e._key]"); - db._query("FOR v, e IN OUTBOUND SHORTEST_PATH 'circles/A' TO 'circles/D' edges RETURN [v._key, e._key]"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock GRAPHSP_02_A_to_D - -We can see our expectations are fulfilled. We find the vertices in the correct ordering and -the first edge is *null*, because no edge is pointing to the start vertex on t his path. - -We can also compute shortest paths based on documents found in collections: - - @startDocuBlockInline GRAPHSP_03_A_to_D - @EXAMPLE_ARANGOSH_OUTPUT{GRAPHSP_03_A_to_D} - db._query("FOR a IN circles FILTER a._key == 'A' FOR d IN circles FILTER d._key == 'D' FOR v, e IN OUTBOUND SHORTEST_PATH a TO d GRAPH 'traversalGraph' RETURN [v._key, e._key]"); - db._query("FOR a IN circles FILTER a._key == 'A' FOR d IN circles FILTER d._key == 'D' FOR v, e IN OUTBOUND SHORTEST_PATH a TO d edges RETURN [v._key, e._key]"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock GRAPHSP_03_A_to_D - - -And finally clean it up again: - - @startDocuBlockInline GRAPHSP_99_drop_graph - @EXAMPLE_ARANGOSH_OUTPUT{GRAPHSP_99_drop_graph} - var examples = require("@arangodb/graph-examples/example-graph.js"); - examples.dropGraph("traversalGraph"); - ~removeIgnoreCollection("circles"); - ~removeIgnoreCollection("edges"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock GRAPHSP_99_drop_graph diff --git a/Documentation/Books/AQL/Graphs/Traversals.md b/Documentation/Books/AQL/Graphs/Traversals.md deleted file mode 100644 index 58435dc61a85..000000000000 --- a/Documentation/Books/AQL/Graphs/Traversals.md +++ /dev/null @@ -1,543 +0,0 @@ -Graph traversals in AQL -======================= - -Syntax ------- - -There are two slightly different syntaxes for traversals in AQL, one for -- [named graphs](../../Manual/Graphs/index.html#named-graphs) and another to -- specify a [set of edge collections](#working-with-collection-sets) - ([anonymous graph](../../Manual/Graphs/index.html#anonymous-graphs)). - -### Working with named graphs - -``` -[WITH vertexCollection1[, vertexCollection2[, ...vertexCollectionN]]] -FOR vertex[, edge[, path]] - IN [min[..max]] - OUTBOUND|INBOUND|ANY startVertex - GRAPH graphName - [PRUNE pruneCondition] - [OPTIONS options] -``` -- `WITH`: optional for single server instances, but required for - [graph traversals in a cluster](#graph-traversals-in-a-cluster). - - **collections** (collection, *repeatable*): list of vertex collections that will - be involved in the traversal -- `FOR`: emits up to three variables: - - **vertex** (object): the current vertex in a traversal - - **edge** (object, *optional*): the current edge in a traversal - - **path** (object, *optional*): representation of the current path with - two members: - - `vertices`: an array of all vertices on this path - - `edges`: an array of all edges on this path -- `IN` `min..max`: the minimal and maximal depth for the traversal: - - **min** (number, *optional*): edges and vertices returned by this query will - start at the traversal depth of *min* (thus edges and vertices below will - not be returned). If not specified, it defaults to 1. The minimal - possible value is 0. - - **max** (number, *optional*): up to *max* length paths are traversed. - If omitted, *max* defaults to *min*. Thus only the vertices and edges in - the range of *min* are returned. *max* can not be specified without *min*. -- `OUTBOUND|INBOUND|ANY`: follow outgoing, incoming, or edges pointing in either - direction in the traversal; Please note that this can't be replaced by a bind parameter. -- **startVertex** (string|object): a vertex where the traversal will originate from. - This can be specified in the form of an ID string or in the form of a document - with the attribute `_id`. All other values will lead to a warning and an empty - result. If the specified document does not exist, the result is empty as well - and there is no warning. -- `GRAPH` **graphName** (string): the name identifying the named graph. - Its vertex and edge collections will be looked up. Note that the graph name - is like a regular string, hence it must be enclosed by quote marks. -- `PRUNE` **condition** (AQL condition, *optional*, (since version 3.4.5)): - A condition, like in a FILTER statement, which will be evaluated in every step - of the traversal, as early as possible. The semantics of this condition is as follows: - * If the condition evaluates to `true` this path will be considered as a result, - it might still be post filtered or ignored due to depth constraints. However - the search will not continue from this path, namely there will be no - result having this path as a prefix. - e.g.: Take the path: `(A) -> (B) -> (C)` starting at `A` and PRUNE on `B` - will result in `(A)` and `(A) -> (B)` being valid paths, and `(A) -> (B) -> (C)` - not returned, it got pruned on B. - * If the condition evaluates to `false` we will continue our search beyond - this path. - There is only one `PRUNE` condition possible, but it can contain an arbitrary amount - of `AND` or `OR` statements. - Also note that you can use the output variables of this traversal in the `PRUNE`, - as well as all variables defined before this Traversal statement. -- `OPTIONS` **options** (object, *optional*): used to modify the execution of the - traversal. Only the following attributes have an effect, all others are ignored: - - **bfs** (bool): optionally use the alternative breadth-first traversal algorithm - - true – the traversal will be executed breadth-first. The results will first - contain all vertices at depth 1. Than all vertices at depth 2 and so on. - - false (default) – the traversal will be executed depth-first. It will first - return all paths from *min* depth to *max* depth for one vertex at depth 1. - Than for the next vertex at depth 1 and so on. - - **uniqueVertices** (string): optionally ensure vertex uniqueness - - "path" – it is guaranteed that there is no path returned with a duplicate vertex - - "global" – it is guaranteed that each vertex is visited at most once during - the traversal, no matter how many paths lead from the start vertex to this one. - If you start with a `min depth > 1` a vertex that was found before *min* depth - might not be returned at all (it still might be part of a path). **Note:** - Using this configuration the result is not deterministic any more. If there - are multiple paths from *startVertex* to *vertex*, one of those is picked. - It is required to set `bfs: true` because with depth-first search the results - would be unpredictable. - - "none" (default) – no uniqueness check is applied on vertices - - **uniqueEdges** (string): optionally ensure edge uniqueness - - "path" (default) – it is guaranteed that there is no path returned with a - duplicate edge - - "none" – no uniqueness check is applied on edges. **Note:** - Using this configuration the traversal will follow edges in cycles. - -### Working with collection sets - -``` -[WITH vertexCollection1[, vertexCollection2[, ...vertexCollectionN]]] -FOR vertex[, edge[, path]] - IN [min[..max]] - OUTBOUND|INBOUND|ANY startVertex - edgeCollection1, ..., edgeCollectionN - [PRUNE pruneCondition] - [OPTIONS options] -``` - -Instead of `GRAPH graphName` you may specify a list of edge collections. Vertex -collections are determined by the edges in the edge collections. The traversal -options are the same as with the [named graph variant](#working-with-named-graphs). - -If the same edge collection is specified multiple times, it will behave as if it -were specified only once. Specifying the same edge collection is only allowed when -the collections do not have conflicting traversal directions. - -ArangoSearch Views cannot be used as edge collections. - -### Traversing in mixed directions - -For traversals with a list of edge collections you can optionally specify the -direction for some of the edge collections. Say for example you have three edge -collections *edges1*, *edges2* and *edges3*, where in *edges2* the direction has -no relevance but in *edges1* and *edges3* the direction should be taken into account. -In this case you can use *OUTBOUND* as general traversal direction and *ANY* -specifically for *edges2* as follows: - -``` -FOR vertex IN OUTBOUND - startVertex - edges1, ANY edges2, edges3 -``` - -All collections in the list that do not specify their own direction will use the -direction defined after `IN`. This allows to use a different direction for each -collection in your traversal. - -### Graph traversals in a cluster - -Due to the nature of graphs, edges may reference vertices from arbitrary -collections. Following the paths can thus involve documents from various -collections and it's not possible to predict which will be visited in a -traversal. Hence, which collections need to be locked can only be determined -at run time. Deadlocks may occur under certain circumstances. - -Please consider to use the [`WITH` statement](../Operations/With.md) to -specify the collections you expect to be involved. - -Using filters and the explainer to extrapolate the costs --------------------------------------------------------- - -All three variables emitted by the traversals might as well be used in filter -statements. For some of these filter statements the optimizer can detect that it -is possible to prune paths of traversals earlier, hence filtered results will -not be emitted to the variables in the first place. This may significantly -improve the performance of your query. Whenever a filter is not fulfilled, -the complete set of *vertex*, *edge* and *path* will be skipped. All paths -with a length greater than *max* will never be computed. - -In the current state, `AND` combined filters can be optimized, but `OR` -combined filters cannot. - -The following examples are based on the [traversal graph](../../Manual/Graphs/index.html#the-traversal-graph). - -### Pruning - -Introduced in: v3.4.5 - -Pruning is the easiest variant to formulate conditions to reduce the amount of data -to be checked during a search. So it allows to improve query performance and reduces -the amount of overhead generated by the query. Pruning can be executed on the -vertex, the edge and the path and any variable defined before. -See examples: - - @startDocuBlockInline GRAPHTRAV_graphPruneEdges - @EXAMPLE_AQL{GRAPHTRAV_graphPruneEdges} - @DATASET{traversalGraph} - FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - PRUNE e.theTruth == true - RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label } - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_graphPruneEdges - -This will search until it sees an edge having `theTruth == true`. -The path with this edge will be returned, the search will not -continue after this edge. -Namely all responses either have no edge with `theTruth == true` -or the last edge on the path has `theTruth == true`. - - @startDocuBlockInline GRAPHTRAV_graphPruneVertices - @EXAMPLE_AQL{GRAPHTRAV_graphPruneVertices} - @DATASET{traversalGraph} - FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - PRUNE v._key == 'G' - FILTER v._key == 'G' - RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label } - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_graphPruneVertices - -This will search for all paths from the source `circles/A` to the vertex `circles/G`. -This is done with first the PRUNE which makes sure we stop search as soon as we have found -`G` and we will not go beyond `G` and via a loop return to it. -With the second filter, we remove all paths that do not end in `G` namely -all shorter ones that have not been cut out by prune. -Hence the list of all paths from `A` to `G` are returned. - -Note you can also prune as soon as you reach a certain collection with the following -example: - - @startDocuBlockInline GRAPHTRAV_graphPruneCollection - @EXAMPLE_AQL{GRAPHTRAV_graphPruneCollection} - @DATASET{traversalGraph} - FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - PRUNE IS_SAME_COLLECTION('circles', v) - RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label } - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_graphPruneCollection - -### Filtering on paths - -Filtering on paths allows for the second most powerful filtering and may have the -second highest impact on performance. Using the path variable you can filter on -specific iteration depths. You can filter for absolute positions in the path -by specifying a positive number (which then qualifies for the optimizations), -or relative positions to the end of the path by specifying a negative number. - -#### Filtering edges on the path - - - @startDocuBlockInline GRAPHTRAV_graphFilterEdges - @EXAMPLE_AQL{GRAPHTRAV_graphFilterEdges} - @DATASET{traversalGraph} - FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.edges[0].theTruth == true - RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label } - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_graphFilterEdges - - -will filter all paths where the start edge (index 0) has the attribute -*theTruth* equal to *true*. The resulting paths will be up to 5 items long. - -### Filtering vertices on the path - -Similar to filtering the edges on the path you can also filter the vertices: - - @startDocuBlockInline GRAPHTRAV_graphFilterVertices - @EXAMPLE_AQL{GRAPHTRAV_graphFilterVertices} - @DATASET{traversalGraph} - FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.vertices[1]._key == "G" - RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label } - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_graphFilterVertices - -#### Combining several filters - -And of course you can combine these filters in any way you like: - - @startDocuBlockInline GRAPHTRAV_graphFilterCombine - @EXAMPLE_AQL{GRAPHTRAV_graphFilterCombine} - @DATASET{traversalGraph} - FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.edges[0].theTruth == true - AND p.edges[1].theFalse == false - FILTER p.vertices[1]._key == "G" - RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label } - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_graphFilterCombine - -The query will filter all paths where the first edge has the attribute -*theTruth* equal to *true*, the first vertex is "G" and the second edge has -the attribute *theFalse* equal to *false*. The resulting paths will be up to -5 items long. - -**Note**: Although we have defined a *min* of 1, we will only get results of -depth 2. This is because for all results in depth 1 the second edge does not -exist and hence cannot fulfill the condition here. - -#### Filter on the entire path - -With the help of array comparison operators filters can also be defined -on the entire path, like ALL edges should have theTruth == true: - - @startDocuBlockInline GRAPHTRAV_graphFilterEntirePath - @EXAMPLE_AQL{GRAPHTRAV_graphFilterEntirePath} - @DATASET{traversalGraph} - FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.edges[*].theTruth ALL == true - RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label } - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_graphFilterEntirePath - -Or NONE of the edges should have theTruth == true: - - @startDocuBlockInline GRAPHTRAV_graphFilterPathEdges - @EXAMPLE_AQL{GRAPHTRAV_graphFilterPathEdges} - @DATASET{traversalGraph} - FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.edges[*].theTruth NONE == true - RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label } - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_graphFilterPathEdges - -Both examples above are recognized by the optimizer and can potentially use other indexes -than the edge index. - -It is also possible to define that at least one edge on the path has to fulfill the condition: - - @startDocuBlockInline GRAPHTRAV_graphFilterPathAnyEdge - @EXAMPLE_AQL{GRAPHTRAV_graphFilterPathAnyEdge} - @DATASET{traversalGraph} - FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.edges[*].theTruth ANY == true - RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label } - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_graphFilterPathAnyEdge - -It is guaranteed that at least one, but potentially more edges fulfill the condition. -All of the above filters can be defined on vertices in the exact same way. - -### Filtering on the path vs. filtering on vertices or edges -Filtering on the path influences the Iteration on your graph. If certain conditions -aren't met, the traversal may stop continuing along this path. - -In contrast filters on vertex or edge only express whether you're interested in the actual value of these -documents. Thus, it influences the list of returned documents (if you return v or e) similar -as specifying a non-null `min` value. If you specify a min value of 2, the traversal over the first -two nodes of these paths has to be executed - you just won't see them in your result array. - -Similar are filters on vertices or edges - the traverser has to walk along these nodes, since -you may be interested in documents further down the path. - -### Examples - -We will create a simple symmetric traversal demonstration graph: - -![traversal graph](traversal_graph.png) - - @startDocuBlockInline GRAPHTRAV_01_create_graph - @EXAMPLE_ARANGOSH_OUTPUT{GRAPHTRAV_01_create_graph} - ~addIgnoreCollection("circles"); - ~addIgnoreCollection("edges"); - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("traversalGraph"); - db.circles.toArray(); - db.edges.toArray(); - print("once you don't need them anymore, clean them up:"); - examples.dropGraph("traversalGraph"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock GRAPHTRAV_01_create_graph - -To get started we select the full graph. For better overview we only return -the vertex IDs: - - @startDocuBlockInline GRAPHTRAV_02_traverse_all_a - @EXAMPLE_AQL{GRAPHTRAV_02_traverse_all_a} - @DATASET{traversalGraph} - FOR v IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - RETURN v._key - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_02_traverse_all_a - - @startDocuBlockInline GRAPHTRAV_02_traverse_all_b - @EXAMPLE_AQL{GRAPHTRAV_02_traverse_all_b} - @DATASET{traversalGraph} - FOR v IN 1..3 OUTBOUND 'circles/A' edges RETURN v._key - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_02_traverse_all_b - -We can nicely see that it is heading for the first outer vertex, then goes back to -the branch to descend into the next tree. After that it returns to our start node, -to descend again. As we can see both queries return the same result, the first one -uses the named graph, the second uses the edge collections directly. - -Now we only want the elements of a specific depth (min = max = 2), the ones that -are right behind the fork: - - @startDocuBlockInline GRAPHTRAV_03_traverse_3a - @EXAMPLE_AQL{GRAPHTRAV_03_traverse_3a} - @DATASET{traversalGraph} - FOR v IN 2..2 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - RETURN v._key - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_03_traverse_3a - - @startDocuBlockInline GRAPHTRAV_03_traverse_3b - @EXAMPLE_AQL{GRAPHTRAV_03_traverse_3b} - @DATASET{traversalGraph} - FOR v IN 2 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - RETURN v._key - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_03_traverse_3b - -As you can see, we can express this in two ways: with or without *max* parameter -in the expression. - -### Filter examples - -Now let's start to add some filters. We want to cut of the branch on the right -side of the graph, we may filter in two ways: - -- we know the vertex at depth 1 has `_key` == `G` -- we know the `label` attribute of the edge connecting **A** to **G** is `right_foo` - - @startDocuBlockInline GRAPHTRAV_04_traverse_4a - @EXAMPLE_AQL{GRAPHTRAV_04_traverse_4a} - @DATASET{traversalGraph} - FOR v, e, p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.vertices[1]._key != 'G' - RETURN v._key - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_04_traverse_4a - - @startDocuBlockInline GRAPHTRAV_04_traverse_4b - @EXAMPLE_AQL{GRAPHTRAV_04_traverse_4b} - @DATASET{traversalGraph} - FOR v, e, p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.edges[0].label != 'right_foo' - RETURN v._key - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_04_traverse_4b - -As we can see all vertices behind **G** are skipped in both queries. -The first filters on the vertex `_key`, the second on an edge label. -Note again, as soon as a filter is not fulfilled for any of the three elements -`v`, `e` or `p`, the complete set of these will be excluded from the result. - -We also may combine several filters, for instance to filter out the right branch -(**G**), and the **E** branch: - - @startDocuBlockInline GRAPHTRAV_05_traverse_5a - @EXAMPLE_AQL{GRAPHTRAV_05_traverse_5a} - @DATASET{traversalGraph} - FOR v,e,p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.vertices[1]._key != 'G' - FILTER p.edges[1].label != 'left_blub' - RETURN v._key - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_05_traverse_5a - - @startDocuBlockInline GRAPHTRAV_05_traverse_5b - @EXAMPLE_AQL{GRAPHTRAV_05_traverse_5b} - @DATASET{traversalGraph} - FOR v,e,p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.vertices[1]._key != 'G' AND p.edges[1].label != 'left_blub' - RETURN v._key - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_05_traverse_5b - -As you can see, combining two `FILTER` statements with an `AND` has the same result. - -Comparing OUTBOUND / INBOUND / ANY ----------------------------------- - -All our previous examples traversed the graph in *OUTBOUND* edge direction. -You may however want to also traverse in reverse direction (*INBOUND*) or -both (*ANY*). Since `circles/A` only has outbound edges, we start our queries -from `circles/E`: - - @startDocuBlockInline GRAPHTRAV_06_traverse_6a - @EXAMPLE_AQL{GRAPHTRAV_06_traverse_6a} - @DATASET{traversalGraph} - FOR v IN 1..3 OUTBOUND 'circles/E' GRAPH 'traversalGraph' - RETURN v._key - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_06_traverse_6a - - @startDocuBlockInline GRAPHTRAV_06_traverse_6b - @EXAMPLE_AQL{GRAPHTRAV_06_traverse_6b} - @DATASET{traversalGraph} - FOR v IN 1..3 INBOUND 'circles/E' GRAPH 'traversalGraph' - RETURN v._key - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_06_traverse_6b - - @startDocuBlockInline GRAPHTRAV_06_traverse_6c - @EXAMPLE_AQL{GRAPHTRAV_06_traverse_6c} - @DATASET{traversalGraph} - FOR v IN 1..3 ANY 'circles/E' GRAPH 'traversalGraph' - RETURN v._key - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_06_traverse_6c - -The first traversal will only walk in the forward (*OUTBOUND*) direction. -Therefore from **E** we only can see **F**. Walking in reverse direction -(*INBOUND*), we see the path to **A**: **B** → **A**. - -Walking in forward and reverse direction (*ANY*) we can see a more diverse result. -First of all, we see the simple paths to **F** and **A**. However, these vertices -have edges in other directions and they will be traversed. - -**Note**: The traverser may use identical edges multiple times. For instance, -if it walks from **E** to **F**, it will continue to walk from **F** to **E** -using the same edge once again. Due to this we will see duplicate nodes in the result. - -Please note that the direction can't be passed in by a bind parameter. - -Use the AQL explainer for optimizations ---------------------------------------- - -Now let's have a look what the optimizer does behind the curtain and inspect -traversal queries using [the explainer](../ExecutionAndPerformance/Optimizer.md): - - @startDocuBlockInline GRAPHTRAV_07_traverse_7 - @EXAMPLE_AQL{GRAPHTRAV_07_traverse_7} - @DATASET{traversalGraph} - @EXPLAIN{TRUE} - FOR v,e,p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - LET localScopeVar = RAND() > 0.5 - FILTER p.edges[0].theTruth != localScopeVar - RETURN v._key - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_07_traverse_7 - - @startDocuBlockInline GRAPHTRAV_07_traverse_8 - @EXAMPLE_AQL{GRAPHTRAV_07_traverse_8} - @DATASET{traversalGraph} - @EXPLAIN{TRUE} - FOR v,e,p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.edges[0].label == 'right_foo' - RETURN v._key - @END_EXAMPLE_AQL - @endDocuBlock GRAPHTRAV_07_traverse_8 - -We now see two queries: In one we add a variable *localScopeVar*, which is outside -the scope of the traversal itself - it is not known inside of the traverser. -Therefore, this filter can only be executed after the traversal, which may be -undesired in large graphs. The second query on the other hand only operates on the -path, and therefore this condition can be used during the execution of the traversal. -Paths that are filtered out by this condition won't be processed at all. - -And finally clean it up again: - - @startDocuBlockInline GRAPHTRAV_99_drop_graph - @EXAMPLE_ARANGOSH_OUTPUT{GRAPHTRAV_99_drop_graph} - ~examples.loadGraph("traversalGraph"); - var examples = require("@arangodb/graph-examples/example-graph.js"); - examples.dropGraph("traversalGraph"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock GRAPHTRAV_99_drop_graph - -If this traversal is not powerful enough for your needs, like you cannot describe -your conditions as AQL filter statements, then you might want to have a look at -[manually crafted traversers](../../Manual/Graphs/Traversals/index.html). - -Also see how to [combine graph traversals](../Examples/CombiningGraphTraversals.md). diff --git a/Documentation/Books/AQL/Graphs/TraversalsExplained.md b/Documentation/Books/AQL/Graphs/TraversalsExplained.md deleted file mode 100644 index 4ac6337fe414..000000000000 --- a/Documentation/Books/AQL/Graphs/TraversalsExplained.md +++ /dev/null @@ -1,82 +0,0 @@ -Traversals explained -==================== - -General query idea ------------------- - -A traversal starts at one specific document (*startVertex*) and follows all -edges connected to this document. For all documents (*vertices*) that are -targeted by these edges it will again follow all edges connected to them and -so on. It is possible to define how many of these follow iterations should be -executed at least (*min* depth) and at most (*max* depth). - -For all vertices that were visited during this process in the range between -*min* depth and *max* depth iterations you will get a result in form of a -set with three items: - -1. The visited vertex. -2. The edge pointing to it. -3. The complete path from startVertex to the visited vertex as object with an - attribute *edges* and an attribute *vertices*, each a list of the coresponding - elements. These lists are sorted, which means the first element in *vertices* - is the *startVertex* and the last is the visited vertex, and the n-th element - in *edges* connects the n-th element with the (n+1)-th element in *vertices*. - -Example execution ------------------ - -Let's take a look at a simple example to explain how it works. -This is the graph that we are going to traverse: - -![traversal graph](traversal_graph.png) - -We use the following parameters for our query: - -1. We start at the vertex **A**. -2. We use a *min* depth of 1. -3. We use a *max* depth of 2. -4. We follow only in *OUTBOUND* direction of edges - -![traversal graph step 1](traversal_graph1.png) - -Now it walks to one of the direct neighbors of **A**, say **B** (note: ordering -is not guaranteed!): - -![traversal graph step 2](traversal_graph2.png) - -The query will remember the state (red circle) and will emit the first result -**A** → **B** (black box). This will also prevent the traverser to be trapped -in cycles. Now again it will visit one of the direct neighbors of **B**, say **E**: - -![traversal graph step 3](traversal_graph3.png) - -We have limited the query with a *max* depth of *2*, so it will not pick any -neighbor of **E**, as the path from **A** to **E** already requires *2* steps. -Instead, we will go back one level to **B** and continue with any other direct -neighbor there: - -![traversal graph step 4](traversal_graph4.png) - -Again after we produced this result we will step back to **B**. -But there is no neighbor of **B** left that we have not yet visited. -Hence we go another step back to **A** and continue with any other neighbor there. - -![traversal graph step 5](traversal_graph5.png) - -And identical to the iterations before we will visit **H**: - -![traversal graph step 6](traversal_graph6.png) - -And **J**: - -![traversal graph step 7](traversal_graph7.png) - -After these steps there is no further result left. So all together this query -has returned the following paths: - -1. **A** → **B** -2. **A** → **B** → **E** -3. **A** → **B** → **C** -4. **A** → **G** -5. **A** → **G** → **H** -6. **A** → **G** → **J** diff --git a/Documentation/Books/AQL/Graphs/train_map.png b/Documentation/Books/AQL/Graphs/train_map.png deleted file mode 100644 index e4f1c6a0e96f..000000000000 Binary files a/Documentation/Books/AQL/Graphs/train_map.png and /dev/null differ diff --git a/Documentation/Books/AQL/Graphs/traversal_graph.png b/Documentation/Books/AQL/Graphs/traversal_graph.png deleted file mode 100644 index 3d8325bc1519..000000000000 Binary files a/Documentation/Books/AQL/Graphs/traversal_graph.png and /dev/null differ diff --git a/Documentation/Books/AQL/Graphs/traversal_graph1.png b/Documentation/Books/AQL/Graphs/traversal_graph1.png deleted file mode 100644 index 99f8d232551f..000000000000 Binary files a/Documentation/Books/AQL/Graphs/traversal_graph1.png and /dev/null differ diff --git a/Documentation/Books/AQL/Graphs/traversal_graph2.png b/Documentation/Books/AQL/Graphs/traversal_graph2.png deleted file mode 100644 index 8bc6984293a5..000000000000 Binary files a/Documentation/Books/AQL/Graphs/traversal_graph2.png and /dev/null differ diff --git a/Documentation/Books/AQL/Graphs/traversal_graph3.png b/Documentation/Books/AQL/Graphs/traversal_graph3.png deleted file mode 100644 index c71af0fdcc85..000000000000 Binary files a/Documentation/Books/AQL/Graphs/traversal_graph3.png and /dev/null differ diff --git a/Documentation/Books/AQL/Graphs/traversal_graph4.png b/Documentation/Books/AQL/Graphs/traversal_graph4.png deleted file mode 100644 index b9a62df2b131..000000000000 Binary files a/Documentation/Books/AQL/Graphs/traversal_graph4.png and /dev/null differ diff --git a/Documentation/Books/AQL/Graphs/traversal_graph5.png b/Documentation/Books/AQL/Graphs/traversal_graph5.png deleted file mode 100644 index 410c6f2e280f..000000000000 Binary files a/Documentation/Books/AQL/Graphs/traversal_graph5.png and /dev/null differ diff --git a/Documentation/Books/AQL/Graphs/traversal_graph6.png b/Documentation/Books/AQL/Graphs/traversal_graph6.png deleted file mode 100644 index faa0ab98cb5c..000000000000 Binary files a/Documentation/Books/AQL/Graphs/traversal_graph6.png and /dev/null differ diff --git a/Documentation/Books/AQL/Graphs/traversal_graph7.png b/Documentation/Books/AQL/Graphs/traversal_graph7.png deleted file mode 100644 index ca633dda1057..000000000000 Binary files a/Documentation/Books/AQL/Graphs/traversal_graph7.png and /dev/null differ diff --git a/Documentation/Books/AQL/Invocation/README.md b/Documentation/Books/AQL/Invocation/README.md deleted file mode 100644 index 2328af293126..000000000000 --- a/Documentation/Books/AQL/Invocation/README.md +++ /dev/null @@ -1,27 +0,0 @@ -How to invoke AQL -================= - -AQL queries can be executed using: - -- the web interface, -- the `db` object (either in arangosh or in a Foxx service) -- or the raw HTTP API. - -There are always calls to the server's API under the hood, but the web interface -and the `db` object abstract away the low-level communication details and are -thus easier to use. - -The ArangoDB Web Interface has a [specific tab for AQL queries execution](../Invocation/WithWebInterface.md). - -You can run [AQL queries from the ArangoDB Shell](../Invocation/WithArangosh.md) -with the [_query](WithArangosh.html#with-dbquery) and -[_createStatement](WithArangosh.html#with-createstatement-arangostatement) methods -of the [`db` object](../../Manual/Appendix/References/DBObject.html). This chapter -also describes how to use bind parameters, statistics, counting and cursors with -arangosh. - -If you are using Foxx, see [how to write database queries](../../Manual/Foxx/GettingStarted.html#writing-database-queries) -for examples including tagged template strings. - -If you want to run AQL queries from your application via the HTTP REST API, -see the full API description at [HTTP Interface for AQL Query Cursors](../../HTTP/AqlQueryCursor/index.html). diff --git a/Documentation/Books/AQL/Invocation/WithArangosh.md b/Documentation/Books/AQL/Invocation/WithArangosh.md deleted file mode 100644 index 38c8631a641c..000000000000 --- a/Documentation/Books/AQL/Invocation/WithArangosh.md +++ /dev/null @@ -1,375 +0,0 @@ -Executing queries from Arangosh -=============================== - -Within the ArangoDB shell, the *_query* and *_createStatement* methods of the -*db* object can be used to execute AQL queries. This chapter also describes -how to use bind parameters, counting, statistics and cursors. - -With db._query --------------- - -One can execute queries with the *_query* method of the *db* object. -This will run the specified query in the context of the currently -selected database and return the query results in a cursor. The results of the cursor -can be printed using its *toArray* method: - - @startDocuBlockInline 01_workWithAQL_all - @EXAMPLE_ARANGOSH_OUTPUT{01_workWithAQL_all} - ~addIgnoreCollection("mycollection") - db._create("mycollection") - db.mycollection.save({ _key: "testKey", Hello : "World" }) - db._query('FOR my IN mycollection RETURN my._key').toArray() - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 01_workWithAQL_all - -### db._query Bind parameters - -To pass bind parameters into a query, they can be specified as second argument to the -*_query* method: - - @startDocuBlockInline 02_workWithAQL_bindValues - @EXAMPLE_ARANGOSH_OUTPUT{02_workWithAQL_bindValues} - |db._query( - | 'FOR c IN @@collection FILTER c._key == @key RETURN c._key', { - | '@collection': 'mycollection', - | 'key': 'testKey' - }).toArray(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 02_workWithAQL_bindValues - -### ES6 template strings - -It is also possible to use ES6 template strings for generating AQL queries. There is -a template string generator function named *aql*; we call it once to demonstrate -its result, and once putting it directly into the query: - -```js -var key = 'testKey'; -aql`FOR c IN mycollection FILTER c._key == ${key} RETURN c._key`; -{ - "query" : "FOR c IN mycollection FILTER c._key == @value0 RETURN c._key", - "bindVars" : { - "value0" : "testKey" - } -} -``` - - @startDocuBlockInline 02_workWithAQL_aqlQuery - @EXAMPLE_ARANGOSH_OUTPUT{02_workWithAQL_aqlQuery} - var key = 'testKey'; - |db._query( - | aql`FOR c IN mycollection FILTER c._key == ${key} RETURN c._key` - ).toArray(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 02_workWithAQL_aqlQuery - -Arbitrary JavaScript expressions can be used in queries that are generated with the -*aql* template string generator. Collection objects are handled automatically: - - @startDocuBlockInline 02_workWithAQL_aqlCollectionQuery - @EXAMPLE_ARANGOSH_OUTPUT{02_workWithAQL_aqlCollectionQuery} - var key = 'testKey'; - |db._query(aql`FOR doc IN ${ db.mycollection } RETURN doc` - ).toArray(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 02_workWithAQL_aqlCollectionQuery - -Note: data-modification AQL queries normally do not return a result (unless the AQL query -contains an extra *RETURN* statement). When not using a *RETURN* statement in the query, the -*toArray* method will return an empty array. - -### Statistics and extra Information - -It is always possible to retrieve statistics for a query with the *getExtra* method: - - @startDocuBlockInline 03_workWithAQL_getExtra - @EXAMPLE_ARANGOSH_OUTPUT{03_workWithAQL_getExtra} - |db._query(`FOR i IN 1..100 - | INSERT { _key: CONCAT('test', TO_STRING(i)) } - | INTO mycollection` - ).getExtra(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 03_workWithAQL_getExtra - -The meaning of the statistics values is described in [Execution statistics](../ExecutionAndPerformance/QueryStatistics.md). -You also will find warnings in here; If you're designing queries on the shell be sure to also look at it. - -### Setting a memory limit - -To set a memory limit for the query, pass *options* to the *_query* method. -The memory limit specifies the maximum number of bytes that the query is -allowed to use. When a single AQL query reaches the specified limit value, -the query will be aborted with a *resource limit exceeded* exception. In a -cluster, the memory accounting is done per shard, so the limit value is -effectively a memory limit per query per shard. - - @startDocuBlockInline 02_workWithAQL_memoryLimit - @EXAMPLE_ARANGOSH_OUTPUT{02_workWithAQL_memoryLimit} - |db._query( - | 'FOR i IN 1..100000 SORT i RETURN i', {}, { - | memoryLimit: 100000 - }).toArray(); // xpError(ERROR_RESOURCE_LIMIT) - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 02_workWithAQL_memoryLimit - -If no memory limit is specified, then the server default value (controlled by -startup option *--query.memory-limit* will be used for restricting the maximum amount -of memory the query can use. A memory limit value of *0* means that the maximum -amount of memory for the query is not restricted. - -### Setting options - -There are further options that can be passed in the *options* attribute of the *_query* method: - -- *failOnWarning*: when set to *true*, this will make the query throw an exception and - abort in case a warning occurs. This option should be used in development to catch - errors early. If set to *false*, warnings will not be propagated to exceptions and - will be returned with the query results. There is also a server configuration option - `--query.fail-on-warning` for setting the default value for *failOnWarning* so it does - not need to be set on a per-query level. - -- *cache*: if set to *true*, this will put the query result into the query result cache - if the query result is eligible for caching and the query cache is running in demand - mode. If set to *false*, the query result will not be inserted into the query result - cache. Note that query results will never be inserted into the query result cache if - the query result cache is disabled, and that they will be automatically inserted into - the query result cache when it is active in non-demand mode. - -- *profile*: if set to *true* or *1*, returns extra timing information for the query. The timing - information is accessible via the *getExtra* method of the query result. Set to *2* the query will include execution stats per query plan node in sub-attribute *stats.nodes* of the *extra* return attribute. - Additionally the query plan is returned in the sub-attribute *extra.plan*. - -- *maxWarningCount*: limits the number of warnings that are returned by the query if - *failOnWarning* is not set to *true*. The default value is *10*. - -- *maxNumberOfPlans*: limits the number of query execution plans the optimizer will - create at most. Reducing the number of query execution plans may speed up query plan - creation and optimization for complex queries, but normally there is no need to adjust - this value. - -- *stream*: Specify *true* and the query will be executed in a **streaming** fashion. The query result is - not stored on the server, but calculated on the fly. *Beware*: long-running queries will - need to hold the collection locks for as long as the query cursor exists. It is advisable - to *only* use this option on short-running queries *or* without exclusive locks (write locks on MMFiles). - When set to *false* the query will be executed right away in its entirety. - In that case query results are either returned right away (if the result set is small enough), - or stored on the arangod instance and accessible via the cursor API. - - Please note that the query options `cache`, `count` and `fullCount` will not work on streaming - queries. Additionally query statistics, warnings and profiling data will only be available - after the query is finished. - The default value is *false* - -The following additional attributes can be passed to queries in the RocksDB storage engine: - -- *maxTransactionSize*: transaction size limit in bytes - -- *intermediateCommitSize*: maximum total size of operations after which an intermediate - commit is performed automatically - -- *intermediateCommitCount*: maximum number of operations after which an intermediate - commit is performed automatically - -In the ArangoDB Enterprise Edition there is an additional parameter: - -- *skipInaccessibleCollections* AQL queries (especially graph traversals) will treat - collection to which a user has **no access** rights as if these collections were empty. - Instead of returning a *forbidden access* error, your queries will execute normally. - This is intended to help with certain use-cases: A graph contains several collections - and different users execute AQL queries on that graph. You can now naturally limit the - accessible results by changing the access rights of users on collections. - -With _createStatement (ArangoStatement) ---------------------------------------- - -The *_query* method is a shorthand for creating an ArangoStatement object, -executing it and iterating over the resulting cursor. If more control over the -result set iteration is needed, it is recommended to first create an -ArangoStatement object as follows: - - @startDocuBlockInline 04_workWithAQL_statements1 - @EXAMPLE_ARANGOSH_OUTPUT{04_workWithAQL_statements1} - |stmt = db._createStatement( { - "query": "FOR i IN [ 1, 2 ] RETURN i * 2" } ); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 04_workWithAQL_statements1 - -To execute the query, use the *execute* method of the statement: - - @startDocuBlockInline 05_workWithAQL_statements2 - @EXAMPLE_ARANGOSH_OUTPUT{05_workWithAQL_statements2} - ~var stmt = db._createStatement( { "query": "FOR i IN [ 1, 2 ] RETURN i * 2" } ); - c = stmt.execute(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 05_workWithAQL_statements2 - -### Cursors - -Once the query executed the query results are available in a cursor. -The cursor can return all its results at once using the *toArray* method. -This is a short-cut that you can use if you want to access the full result -set without iterating over it yourself. - - @startDocuBlockInline 05_workWithAQL_statements3 - @EXAMPLE_ARANGOSH_OUTPUT{05_workWithAQL_statements3} - ~var stmt = db._createStatement( { "query": "FOR i IN [ 1, 2 ] RETURN i * 2" } ); - ~var c = stmt.execute(); - c.toArray(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 05_workWithAQL_statements3 - - - -Cursors can also be used to iterate over the result set document-by-document. -To do so, use the *hasNext* and *next* methods of the cursor: - - @startDocuBlockInline 05_workWithAQL_statements4 - @EXAMPLE_ARANGOSH_OUTPUT{05_workWithAQL_statements4} - ~var stmt = db._createStatement( { "query": "FOR i IN [ 1, 2 ] RETURN i * 2" } ); - ~var c = stmt.execute(); - while (c.hasNext()) { require("@arangodb").print(c.next()); } - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 05_workWithAQL_statements4 - -Please note that you can iterate over the results of a cursor only once, and that -the cursor will be empty when you have fully iterated over it. To iterate over -the results again, the query needs to be re-executed. - -Additionally, the iteration can be done in a forward-only fashion. There is no -backwards iteration or random access to elements in a cursor. - -### ArangoStatement parameters binding - -To execute an AQL query using bind parameters, you need to create a statement first -and then bind the parameters to it before execution: - - @startDocuBlockInline 05_workWithAQL_statements5 - @EXAMPLE_ARANGOSH_OUTPUT{05_workWithAQL_statements5} - |var stmt = db._createStatement( { - "query": "FOR i IN [ @one, @two ] RETURN i * 2" } ); - stmt.bind("one", 1); - stmt.bind("two", 2); - c = stmt.execute(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 05_workWithAQL_statements5 - -The cursor results can then be dumped or iterated over as usual, e.g.: - - @startDocuBlockInline 05_workWithAQL_statements6 - @EXAMPLE_ARANGOSH_OUTPUT{05_workWithAQL_statements6} - ~var stmt = db._createStatement( { "query": "FOR i IN [ @one, @two ] RETURN i * 2" } ); - ~stmt.bind("one", 1); - ~stmt.bind("two", 2); - ~var c = stmt.execute(); - c.toArray(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 05_workWithAQL_statements6 - -or - - @startDocuBlockInline 05_workWithAQL_statements7 - @EXAMPLE_ARANGOSH_OUTPUT{05_workWithAQL_statements7} - ~var stmt = db._createStatement( { "query": "FOR i IN [ @one, @two ] RETURN i * 2" } ); - ~stmt.bind("one", 1); - ~stmt.bind("two", 2); - ~var c = stmt.execute(); - while (c.hasNext()) { require("@arangodb").print(c.next()); } - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 05_workWithAQL_statements7 - -Please note that bind parameters can also be passed into the *_createStatement* method directly, -making it a bit more convenient: - - @startDocuBlockInline 05_workWithAQL_statements8 - @EXAMPLE_ARANGOSH_OUTPUT{05_workWithAQL_statements8} - |stmt = db._createStatement( { - | "query": "FOR i IN [ @one, @two ] RETURN i * 2", - | "bindVars": { - | "one": 1, - | "two": 2 - | } - } ); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 05_workWithAQL_statements8 - -### Counting with a cursor - -Cursors also optionally provide the total number of results. By default, they do not. -To make the server return the total number of results, you may set the *count* attribute to -*true* when creating a statement: - - @startDocuBlockInline 05_workWithAQL_statements9 - @EXAMPLE_ARANGOSH_OUTPUT{05_workWithAQL_statements9} - |stmt = db._createStatement( { - | "query": "FOR i IN [ 1, 2, 3, 4 ] RETURN i", - "count": true } ); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 05_workWithAQL_statements9 - -After executing this query, you can use the *count* method of the cursor to get the -number of total results from the result set: - - @startDocuBlockInline 05_workWithAQL_statements10 - @EXAMPLE_ARANGOSH_OUTPUT{05_workWithAQL_statements10} - ~var stmt = db._createStatement( { "query": "FOR i IN [ 1, 2, 3, 4 ] RETURN i", "count": true } ); - var c = stmt.execute(); - c.count(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 05_workWithAQL_statements10 - -Please note that the *count* method returns nothing if you did not specify the *count* -attribute when creating the query. - -This is intentional so that the server may apply optimizations when executing the query and -construct the result set incrementally. Incremental creation of the result sets -is no possible -if all of the results need to be shipped to the client anyway. Therefore, the client -has the choice to specify *count* and retrieve the total number of results for a query (and -disable potential incremental result set creation on the server), or to not retrieve the total -number of results and allow the server to apply optimizations. - -Please note that at the moment the server will always create the full result set for each query so -specifying or omitting the *count* attribute currently does not have any impact on query execution. -This may change in the future. Future versions of ArangoDB may create result sets incrementally -on the server-side and may be able to apply optimizations if a result set is not fully fetched by -a client. - - -### Using cursors to obtain additional information on internal timings - -Cursors can also optionally provide statistics of the internal execution phases. By default, they do not. -To get to know how long parsing, optimization, instantiation and execution took, -make the server return that by setting the *profile* attribute to -*true* when creating a statement: - - @startDocuBlockInline 06_workWithAQL_statements11 - @EXAMPLE_ARANGOSH_OUTPUT{06_workWithAQL_statements11} - |stmt = db._createStatement( { - | "query": "FOR i IN [ 1, 2, 3, 4 ] RETURN i", - options: {"profile": true}} ); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 06_workWithAQL_statements11 - -After executing this query, you can use the *getExtra()* method of the cursor to get the -produced statistics: - - @startDocuBlockInline 06_workWithAQL_statements12 - @EXAMPLE_ARANGOSH_OUTPUT{06_workWithAQL_statements12} - ~var stmt = db._createStatement( { "query": "FOR i IN [ 1, 2, 3, 4 ] RETURN i", options: {"profile": true}} ); - var c = stmt.execute(); - c.getExtra(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 06_workWithAQL_statements12 - -Query validation ----------------- - -The *_parse* method of the *db* object can be used to parse and validate a -query syntactically, without actually executing it. - - @startDocuBlockInline 06_workWithAQL_statements13 - @EXAMPLE_ARANGOSH_OUTPUT{06_workWithAQL_statements13} - db._parse( "FOR i IN [ 1, 2 ] RETURN i" ); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 06_workWithAQL_statements13 - diff --git a/Documentation/Books/AQL/Invocation/WithWebInterface.md b/Documentation/Books/AQL/Invocation/WithWebInterface.md deleted file mode 100644 index c764c3697019..000000000000 --- a/Documentation/Books/AQL/Invocation/WithWebInterface.md +++ /dev/null @@ -1,49 +0,0 @@ -AQL with ArangoDB Web Interface -=============================== - - -In the ArangoDB Web Interface the AQL Editor tab allows to execute ad-hoc AQL -queries. - -Type in a query in the main box and execute it by pressing the *Execute* button. -The query result will be shown in another tab. The editor provides a few example -queries that can be used as templates. - -It also provides a feature to explain a query and inspect its execution plan -(with the *Explain* button). - -Bind parameters can be defined in the right-hand side pane. The format is the -same as used for bind parameters in the HTTP REST API and in (JavaScript) -application code. - -Here is an example: - -```js -FOR doc IN @@collection - FILTER CONTAINS(LOWER(doc.author), @search, false) - RETURN { "name": doc.name, "descr": doc.description, "author": doc.author } -``` - -Bind parameters (table view mode): - -| Key | Value | -|-------------|--------| -| @collection | _apps | -| search | arango | - -Bind parameters (JSON view mode): - -```json -{ - "@collection": "_apps", - "search": "arango" -} -``` - -How bind parameters work can be found in [AQL Fundamentals](../Fundamentals/BindParameters.md). - -Queries can also be saved in the AQL editor along with their bind parameter values -for later reuse. This data is stored in the user profile in the current database -(in the *_users* system table). - -Also see the detailed description of the [Web Interface](../../Manual/Programs/WebInterface/index.html). diff --git a/Documentation/Books/AQL/Operations/Collect.md b/Documentation/Books/AQL/Operations/Collect.md deleted file mode 100644 index 682427a7c6e4..000000000000 --- a/Documentation/Books/AQL/Operations/Collect.md +++ /dev/null @@ -1,338 +0,0 @@ -COLLECT -======= - -The *COLLECT* keyword can be used to group an array by one or multiple group -criteria. - -The *COLLECT* statement will eliminate all local variables in the current -scope. After *COLLECT* only the variables introduced by *COLLECT* itself are -available. - -The general syntaxes for *COLLECT* are: - -``` -COLLECT variableName = expression options -COLLECT variableName = expression INTO groupsVariable options -COLLECT variableName = expression INTO groupsVariable = projectionExpression options -COLLECT variableName = expression INTO groupsVariable KEEP keepVariable options -COLLECT variableName = expression WITH COUNT INTO countVariable options -COLLECT variableName = expression AGGREGATE variableName = aggregateExpression options -COLLECT AGGREGATE variableName = aggregateExpression options -COLLECT WITH COUNT INTO countVariable options -``` - -`options` is optional in all variants. - -Grouping syntaxes ------------------ - -The first syntax form of *COLLECT* only groups the result by the defined group -criteria specified in *expression*. In order to further process the results -produced by *COLLECT*, a new variable (specified by *variableName*) is introduced. -This variable contains the group value. - -Here's an example query that find the distinct values in *u.city* and makes -them available in variable *city*: - -``` -FOR u IN users - COLLECT city = u.city - RETURN { - "city" : city - } -``` - -The second form does the same as the first form, but additionally introduces a -variable (specified by *groupsVariable*) that contains all elements that fell into the -group. This works as follows: The *groupsVariable* variable is an array containing -as many elements as there are in the group. Each member of that array is -a JSON object in which the value of every variable that is defined in the -AQL query is bound to the corresponding attribute. Note that this considers -all variables that are defined before the *COLLECT* statement, but not those on -the top level (outside of any *FOR*), unless the *COLLECT* statement is itself -on the top level, in which case all variables are taken. Furthermore note -that it is possible that the optimizer moves *LET* statements out of *FOR* -statements to improve performance. - -``` -FOR u IN users - COLLECT city = u.city INTO groups - RETURN { - "city" : city, - "usersInCity" : groups - } -``` - -In the above example, the array *users* will be grouped by the attribute -*city*. The result is a new array of documents, with one element per distinct -*u.city* value. The elements from the original array (here: *users*) per city are -made available in the variable *groups*. This is due to the *INTO* clause. - -*COLLECT* also allows specifying multiple group criteria. Individual group -criteria can be separated by commas: - -``` -FOR u IN users - COLLECT country = u.country, city = u.city INTO groups - RETURN { - "country" : country, - "city" : city, - "usersInCity" : groups - } -``` - -In the above example, the array *users* is grouped by country first and then -by city, and for each distinct combination of country and city, the users -will be returned. - - -Discarding obsolete variables ------------------------------ - -The third form of *COLLECT* allows rewriting the contents of the *groupsVariable* -using an arbitrary *projectionExpression*: - -``` -FOR u IN users - COLLECT country = u.country, city = u.city INTO groups = u.name - RETURN { - "country" : country, - "city" : city, - "userNames" : groups - } -``` - -In the above example, only the *projectionExpression* is *u.name*. Therefore, -only this attribute is copied into the *groupsVariable* for each document. -This is probably much more efficient than copying all variables from the scope into -the *groupsVariable* as it would happen without a *projectionExpression*. - -The expression following *INTO* can also be used for arbitrary computations: - -``` -FOR u IN users - COLLECT country = u.country, city = u.city INTO groups = { - "name" : u.name, - "isActive" : u.status == "active" - } - RETURN { - "country" : country, - "city" : city, - "usersInCity" : groups - } -``` - -*COLLECT* also provides an optional *KEEP* clause that can be used to control -which variables will be copied into the variable created by `INTO`. If no -*KEEP* clause is specified, all variables from the scope will be copied as -sub-attributes into the *groupsVariable*. -This is safe but can have a negative impact on performance if there -are many variables in scope or the variables contain massive amounts of data. - -The following example limits the variables that are copied into the *groupsVariable* -to just *name*. The variables *u* and *someCalculation* also present in the scope -will not be copied into *groupsVariable* because they are not listed in the *KEEP* clause: - -``` -FOR u IN users - LET name = u.name - LET someCalculation = u.value1 + u.value2 - COLLECT city = u.city INTO groups KEEP name - RETURN { - "city" : city, - "userNames" : groups[*].name - } -``` - -*KEEP* is only valid in combination with *INTO*. Only valid variable names can -be used in the *KEEP* clause. *KEEP* supports the specification of multiple -variable names. - - -Group length calculation ------------------------- - -*COLLECT* also provides a special *WITH COUNT* clause that can be used to -determine the number of group members efficiently. - -The simplest form just returns the number of items that made it into the -*COLLECT*: - -``` -FOR u IN users - COLLECT WITH COUNT INTO length - RETURN length -``` - -The above is equivalent to, but less efficient than: - -``` -RETURN LENGTH(users) -``` - -The *WITH COUNT* clause can also be used to efficiently count the number -of items in each group: - -``` -FOR u IN users - COLLECT age = u.age WITH COUNT INTO length - RETURN { - "age" : age, - "count" : length - } -``` - -Note: the *WITH COUNT* clause can only be used together with an *INTO* clause. - - -Aggregation ------------ - -A `COLLECT` statement can be used to perform aggregation of data per group. To -only determine group lengths, the `WITH COUNT INTO` variant of `COLLECT` can be -used as described before. - -For other aggregations, it is possible to run aggregate functions on the `COLLECT` -results: - -``` -FOR u IN users - COLLECT ageGroup = FLOOR(u.age / 5) * 5 INTO g - RETURN { - "ageGroup" : ageGroup, - "minAge" : MIN(g[*].u.age), - "maxAge" : MAX(g[*].u.age) - } -``` - -The above however requires storing all group values during the collect operation for -all groups, which can be inefficient. - -The special `AGGREGATE` variant of `COLLECT` allows building the aggregate values -incrementally during the collect operation, and is therefore often more efficient. - -With the `AGGREGATE` variant the above query becomes: - -``` -FOR u IN users - COLLECT ageGroup = FLOOR(u.age / 5) * 5 - AGGREGATE minAge = MIN(u.age), maxAge = MAX(u.age) - RETURN { - ageGroup, - minAge, - maxAge - } -``` - -The `AGGREGATE` keyword can only be used after the `COLLECT` keyword. If used, it -must directly follow the declaration of the grouping keys. If no grouping keys -are used, it must follow the `COLLECT` keyword directly: - - -``` -FOR u IN users - COLLECT AGGREGATE minAge = MIN(u.age), maxAge = MAX(u.age) - RETURN { - minAge, - maxAge - } -``` - -Only specific expressions are allowed on the right-hand side of each `AGGREGATE` -assignment: - -- on the top level, an aggregate expression must be a call to one of the supported - aggregation functions `LENGTH`, `MIN`, `MAX`, `SUM`, `AVERAGE`, `STDDEV_POPULATION`, - `STDDEV_SAMPLE`, `VARIANCE_POPULATION`, `VARIANCE_SAMPLE`, `UNIQUE`, `SORTED_UNIQUE` - or `COUNT_DISTINCT`. The following aliases are allowed too: `COUNT` (for `LENGTH`), - `AVG` (for `AVERAGE`), `STDDEV` (for `STDDEV_POPULATION`), `VARIANCE` (for `VARIANCE_POPULATION`), - `COUNT_UNIQUE` (for `COUNT_DISTINCT`). - -- an aggregate expression must not refer to variables introduced by the `COLLECT` itself - - -COLLECT variants ----------------- - -Since ArangoDB 2.6, there are two variants of *COLLECT* that the optimizer can -choose from: the *sorted* variant and the *hash* variant. The *hash* variant only becomes a -candidate for *COLLECT* statements that do not use an *INTO* clause. - -The optimizer will always generate a plan that employs the *sorted* method. The *sorted* method -requires its input to be sorted by the group criteria specified in the *COLLECT* clause. -To ensure correctness of the result, the AQL optimizer will automatically insert a *SORT* -statement into the query in front of the *COLLECT* statement. The optimizer may be able to -optimize away that *SORT* statement later if a sorted index is present on the group criteria. - -In case a *COLLECT* statement qualifies for using the *hash* variant, the optimizer will create an extra -plan for it at the beginning of the planning phase. In this plan, no extra *SORT* statement will be -added in front of the *COLLECT*. This is because the *hash* variant of *COLLECT* does not require -sorted input. Instead, a *SORT* statement will be added after the *COLLECT* to sort its output. -This *SORT* statement may be optimized away again in later stages. -If the sort order of the *COLLECT* is irrelevant to the user, adding the extra instruction *SORT null* -after the *COLLECT* will allow the optimizer to remove the sorts altogether: - -``` -FOR u IN users - COLLECT age = u.age - SORT null /* note: will be optimized away */ - RETURN age -``` - -Which *COLLECT* variant is used by the optimizer depends on the optimizer's cost estimations. The -created plans with the different *COLLECT* variants will be shipped through the regular optimization -pipeline. In the end, the optimizer will pick the plan with the lowest estimated total cost as usual. - -In general, the *sorted* variant of *COLLECT* should be preferred in cases when there is a sorted index -present on the group criteria. In this case the optimizer can eliminate the *SORT* statement in front -of the *COLLECT*, so that no *SORT* will be left. - -If there is no sorted index available on the group criteria, the up-front sort required by the *sorted* -variant can be expensive. In this case it is likely that the optimizer will prefer the *hash* variant -of *COLLECT*, which does not require its input to be sorted. - -Which variant of *COLLECT* was actually used can be figured out by looking into the execution plan of -a query, specifically the *AggregateNode* and its *aggregationOptions* attribute. - - -Setting COLLECT options ------------------------ - -*options* can be used in a *COLLECT* statement to inform the optimizer about the preferred *COLLECT* -method. When specifying the following appendix to a *COLLECT* statement, the optimizer will always use -the *sorted* variant of *COLLECT* and not even create a plan using the *hash* variant: - -``` -OPTIONS { method: "sorted" } -``` - -It is also possible to specify *hash* as the preferred method. In this case the optimizer will create -a plan using the *hash* method only if the COLLECT statement qualifies (not all COLLECT statements -can use the *hash* method). In case the COLLECT statement qualifies, there will be only a one plan -that uses the *hash* method. If it does not qualify, the optimizer will use the *sorted* method. - -If no method is specified, then the optimizer will create a plan that uses the *sorted* method, and -an additional plan using the *hash* method if the COLLECT statement qualifies for it. - - -COLLECT vs. RETURN DISTINCT ---------------------------- - -In order to make a result set unique, one can either use *COLLECT* or *RETURN DISTINCT*. Behind the -scenes, both variants will work by creating an *AggregateNode*. For both variants, the optimizer -may try the sorted and the hashed variant of *COLLECT*. The difference is therefore mainly syntactical, -with *RETURN DISTINCT* saving a bit of typing when compared to an equivalent *COLLECT*: - -``` -FOR u IN users - RETURN DISTINCT u.age -``` - -``` -FOR u IN users - COLLECT age = u.age - RETURN age -``` - -However, *COLLECT* is vastly more flexible than *RETURN DISTINCT*. Additionally, the order of results is -undefined for a *RETURN DISTINCT*, whereas for a *COLLECT* the results will be sorted. diff --git a/Documentation/Books/AQL/Operations/Filter.md b/Documentation/Books/AQL/Operations/Filter.md deleted file mode 100644 index 597bcea1c724..000000000000 --- a/Documentation/Books/AQL/Operations/Filter.md +++ /dev/null @@ -1,111 +0,0 @@ -FILTER -====== - -The *FILTER* statement can be used to restrict the results to elements that -match an arbitrary logical condition. - -General syntax --------------- - -``` -FILTER condition -``` - -*condition* must be a condition that evaluates to either *false* or *true*. If -the condition result is false, the current element is skipped, so it will not be -processed further and not be part of the result. If the condition is true, the -current element is not skipped and can be further processed. -See [Operators](../Operators.md) for a list of comparison operators, logical -operators etc. that you can use in conditions. - -``` -FOR u IN users - FILTER u.active == true && u.age < 39 - RETURN u -``` - -It is allowed to specify multiple *FILTER* statements in a query, even in -the same block. If multiple *FILTER* statements are used, their results will be -combined with a logical AND, meaning all filter conditions must be true to -include an element. - -``` -FOR u IN users - FILTER u.active == true - FILTER u.age < 39 - RETURN u -``` - -In the above example, all array elements of *users* that have an attribute -*active* with value *true* and that have an attribute *age* with a value less -than *39* (including *null* ones) will be included in the result. All other -elements of *users* will be skipped and not be included in the result produced -by *RETURN*. You may refer to the chapter [Accessing Data from Collections](../Fundamentals/DocumentData.md) -for a description of the impact of non-existent or null attributes. - -Order of operations -------------------- - -Note that the positions of *FILTER* statements can influence the result of a query. -There are 16 active users in the [test data](../Examples/README.md#example-data) -for instance: - -```js -FOR u IN users - FILTER u.active == true - RETURN u -``` - -We can limit the result set to 5 users at most: - -```js -FOR u IN users - FILTER u.active == true - LIMIT 5 - RETURN u -``` - -This may return the user documents of Jim, Diego, Anthony, Michael and Chloe for -instance. Which ones are returned is undefined, since there is no *SORT* statement -to ensure a particular order. If we add a second *FILTER* statement to only return -women... - -```js -FOR u IN users - FILTER u.active == true - LIMIT 5 - FILTER u.gender == "f" - RETURN u -``` - -... it might just return the Chloe document, because the *LIMIT* is applied before -the second *FILTER*. No more than 5 documents arrive at the second *FILTER* block, -and not all of them fulfill the gender criterion, eventhough there are more than -5 active female users in the collection. A more deterministic result can be achieved -by adding a *SORT* block: - -```js -FOR u IN users - FILTER u.active == true - SORT u.age ASC - LIMIT 5 - FILTER u.gender == "f" - RETURN u -``` - -This will return the users Mariah and Mary. If sorted by age in *DESC* order, -then the Sophia, Emma and Madison documents are returned. A *FILTER* after a -*LIMIT* is not very common however, and you probably want such a query instead: - -```js -FOR u IN users - FILTER u.active == true AND u.gender == "f" - SORT u.age ASC - LIMIT 5 - RETURN u -``` - -The significance of where *FILTER* blocks are placed allows that this single -keyword can assume the roles of two SQL keywords, *WHERE* as well as *HAVING*. -AQL's *FILTER* thus works with *COLLECT* aggregates the same as with any other -intermediate result, document attribute etc. diff --git a/Documentation/Books/AQL/Operations/For.md b/Documentation/Books/AQL/Operations/For.md deleted file mode 100644 index 7f739225f5e1..000000000000 --- a/Documentation/Books/AQL/Operations/For.md +++ /dev/null @@ -1,110 +0,0 @@ -FOR -=== - - -The *FOR* keyword can be to iterate over all elements of an array. -The general syntax is: - -```js -FOR variableName IN expression -``` - -There is also a special variant for graph traversals: - -```js -FOR vertexVariableName, edgeVariableName, pathVariableName IN traversalExpression -``` - -For this special case see [the graph traversals chapter](../Graphs/Traversals.md). - -For views, there is a special (optional) `SEARCH` keyword: - -```js -FOR variableName IN viewName SEARCH searchExpression -``` - -Details can be found in [the views chapter](../Views/README.md). - - -For all other cases read on: - -Each array element returned by *expression* is visited exactly once. It is -required that *expression* returns an array in all cases. The empty array is -allowed, too. The current array element is made available for further processing -in the variable specified by *variableName*. - -```js -FOR u IN users - RETURN u -``` - -This will iterate over all elements from the array *users* (note: this array -consists of all documents from the collection named "users" in this case) and -make the current array element available in variable *u*. *u* is not modified in -this example but simply pushed into the result using the *RETURN* keyword. - -Note: When iterating over collection-based arrays as shown here, the order of -documents is undefined unless an explicit sort order is defined using a *SORT* -statement. - -The variable introduced by *FOR* is available until the scope the *FOR* is -placed in is closed. - -Another example that uses a statically declared array of values to iterate over: - -```js -FOR year IN [ 2011, 2012, 2013 ] - RETURN { "year" : year, "isLeapYear" : year % 4 == 0 && (year % 100 != 0 || year % 400 == 0) } -``` - -Nesting of multiple *FOR* statements is allowed, too. When *FOR* statements are -nested, a cross product of the array elements returned by the individual *FOR* -statements will be created. - -```js -FOR u IN users - FOR l IN locations - RETURN { "user" : u, "location" : l } -``` - -In this example, there are two array iterations: an outer iteration over the array -*users* plus an inner iteration over the array *locations*. The inner array is -traversed as many times as there are elements in the outer array. For each -iteration, the current values of *users* and *locations* are made available for -further processing in the variable *u* and *l*. - -## Options - -For collections and views, the `FOR` construct supports an optional `OPTIONS` -suffix to modify behavior. The general syntax is: - -```js -FOR variableName IN expression OPTIONS {option: value, ...} -``` - -### Index hints - -For collections, index hints are provided though this inline options mechanism. -Hints can be specified in two different formats. - -The first format option is the simplest, just a single index name. This should -be sufficient for many cases. Whenever there is a choice to potentially use an -index for this `FOR` loop, the optimizer will first check if the specified index -can be used. If so, it will use it, regardless of whether it would normally use -a different index. If it cannot use that index, then it will fall back to its -normal logic to select another index. If the optional `forceIndexHint: true` is -specified, then it will not fall back, and instead generate an error. - -```js -OPTIONS {indexHint: 'byName'[, forceIndexHint: ]} -``` - -The second is an array of index names, in order of preference. When specified -this way, the optimizer will behave much in the same way as above, but will -check the feasibility of each of the specified indices, in the order they are -given, falling back to its normal logic or failing only if none of the specified -indices are feasible. - -```js -OPTIONS {indexHint: ['byName', 'byColor'][, forceIndexHint: ]} -``` diff --git a/Documentation/Books/AQL/Operations/Insert.md b/Documentation/Books/AQL/Operations/Insert.md deleted file mode 100644 index 7719945ab542..000000000000 --- a/Documentation/Books/AQL/Operations/Insert.md +++ /dev/null @@ -1,143 +0,0 @@ -INSERT -====== - -The *INSERT* keyword can be used to insert new documents into a collection. On a -single server, an insert operation is executed transactionally in an all-or-nothing -fashion. - -If the RocksDB engine is used and intermediate commits are enabled, a query may -execute intermediate transaction commits in case the running transaction (AQL -query) hits the specified size thresholds. In this case, the query's operations -carried out so far will be committed and not rolled back in case of a later abort/rollback. -That behavior can be controlled by adjusting the intermediate commit settings for -the RocksDB engine. - -For sharded collections, the entire query and/or insert operation may not be transactional, -especially if it involves different shards and/or database servers. - -Each *INSERT* operation is restricted to a single collection, and the -[collection name](../../Manual/Appendix/Glossary.html#collection-name) must not be dynamic. -Only a single *INSERT* statement per collection is allowed per AQL query, and -it cannot be followed by read or write operations that access the same collection, by -traversal operations, or AQL functions that can read documents. - -The syntax for an insert operation is: - -``` -INSERT document INTO collection [ OPTIONS options ] -``` - -**Note**: The *IN* keyword is allowed in place of *INTO* and has the same meaning. - -*collection* must contain the name of the collection into which the documents should -be inserted. *document* is the document to be inserted, and it may or may not contain -a *_key* attribute. If no *_key* attribute is provided, ArangoDB will auto-generate -a value for *_key* value. Inserting a document will also auto-generate a document -revision number for the document. - -```js -FOR i IN 1..100 - INSERT { value: i } INTO numbers -``` - -An insert operation can also be performed without a *FOR* loop to insert a -single document: - -```js -INSERT { value: 1 } INTO numbers -``` - -When inserting into an [edge collection](../../Manual/Appendix/Glossary.html#edge-collection), -it is mandatory to specify the attributes *_from* and *_to* in document: - -```js -FOR u IN users - FOR p IN products - FILTER u._key == p.recommendedBy - INSERT { _from: u._id, _to: p._id } INTO recommendations -``` - -Setting query options ---------------------- - -The *OPTIONS* keyword followed by an object with query options can optionally -be provided in an *INSERT* operation. - -It can be used to suppress query errors that may occur when violating unique -key constraints: - -```js -FOR i IN 1..1000 - INSERT { - _key: CONCAT('test', i), - name: "test", - foobar: true - } INTO users OPTIONS { ignoreErrors: true } -``` - -To make sure data are durable when an insert query returns, there is the *waitForSync* -query option: - -```js -FOR i IN 1..1000 - INSERT { - _key: CONCAT('test', i), - name: "test", - foobar: true - } INTO users OPTIONS { waitForSync: true } -``` - -If you want to replace existing documents with documents having the same key -there is the *overwrite* query option. This will let you safely replace the -documents instead of raising an "unique constraint violated error": - -```js -FOR i IN 1..1000 - INSERT { - _key: CONCAT('test', i), - name: "test", - foobar: true - } INTO users OPTIONS { overwrite: true } -``` - -In contrast to the MMFiles engine, the RocksDB engine does not require collection-level -locks. Different write operations on the same collection do not block each other, as -long as there are no _write-write conficts_ on the same documents. From an application -development perspective it can be desired to have exclusive write access on collections, -to simplify the development. Note that writes do not block reads in RocksDB. -Exclusive access can also speed up modification queries, because we avoid conflict checks. - -Use the *exclusive* option to achieve this effect on a per query basis: - -```js -FOR doc IN collection - INSERT { myval: doc.val + 1 } INTO users - OPTIONS { exclusive: true } -``` - -Returning the inserted documents --------------------------------- - -The inserted documents can also be returned by the query. In this case, the `INSERT` -statement can be a `RETURN` statement (intermediate `LET` statements are allowed, too). -To refer to the inserted documents, the `INSERT` statement introduces a pseudo-value -named `NEW`. - -The documents contained in `NEW` will contain all attributes, even those auto-generated by -the database (e.g. `_id`, `_key`, `_rev`). - - -```js -INSERT document INTO collection RETURN NEW -``` - -Following is an example using a variable named `inserted` to return the inserted -documents. For each inserted document, the document key is returned: - -```js -FOR i IN 1..100 - INSERT { value: i } - INTO users - LET inserted = NEW - RETURN inserted._key -``` diff --git a/Documentation/Books/AQL/Operations/Let.md b/Documentation/Books/AQL/Operations/Let.md deleted file mode 100644 index 567ca9b85245..000000000000 --- a/Documentation/Books/AQL/Operations/Let.md +++ /dev/null @@ -1,61 +0,0 @@ -LET -=== - -The *LET* statement can be used to assign an arbitrary value to a variable. -The variable is then introduced in the scope the *LET* statement is placed in. - -The general syntax is: - -``` -LET variableName = expression -``` - -Variables are immutable in AQL, which means they can not be re-assigned: - -```js -LET a = [1, 2, 3] // initial assignment - -a = PUSH(a, 4) // syntax error, unexpected identifier -LET a = PUSH(a, 4) // parsing error, variable 'a' is assigned multiple times -LET b = PUSH(a, 4) // allowed, result: [1, 2, 3, 4] -``` - -*LET* statements are mostly used to declare complex computations and to avoid -repeated computations of the same value at multiple parts of a query. - -``` -FOR u IN users - LET numRecommendations = LENGTH(u.recommendations) - RETURN { - "user" : u, - "numRecommendations" : numRecommendations, - "isPowerUser" : numRecommendations >= 10 - } -``` - -In the above example, the computation of the number of recommendations is -factored out using a *LET* statement, thus avoiding computing the value twice in -the *RETURN* statement. - -Another use case for *LET* is to declare a complex computation in a subquery, -making the whole query more readable. - -``` -FOR u IN users - LET friends = ( - FOR f IN friends - FILTER u.id == f.userId - RETURN f - ) - LET memberships = ( - FOR m IN memberships - FILTER u.id == m.userId - RETURN m - ) - RETURN { - "user" : u, - "friends" : friends, - "numFriends" : LENGTH(friends), - "memberShips" : memberships - } -``` diff --git a/Documentation/Books/AQL/Operations/Limit.md b/Documentation/Books/AQL/Operations/Limit.md deleted file mode 100644 index aa461bcbf829..000000000000 --- a/Documentation/Books/AQL/Operations/Limit.md +++ /dev/null @@ -1,53 +0,0 @@ -LIMIT -===== - -The *LIMIT* statement allows slicing the result array using an -offset and a count. It reduces the number of elements in the result to at most -the specified number. Two general forms of *LIMIT* are followed: - -```js -LIMIT count -LIMIT offset, count -``` - -The first form allows specifying only the *count* value whereas the second form -allows specifying both *offset* and *count*. The first form is identical using -the second form with an *offset* value of *0*. - -```js -FOR u IN users - LIMIT 5 - RETURN u -``` - -Above query returns the first five documents of the *users* collection. -It could also be written as `LIMIT 0, 5` for the same result. -Which documents it actually returns is rather arbitrary, because no explicit -sorting order is specified however. Therefore, a limit should be usually -accompanied by a `SORT` operation. - -The *offset* value specifies how many elements from the result shall be -skipped. It must be 0 or greater. The *count* value specifies how many -elements should be at most included in the result. - -```js -FOR u IN users - SORT u.firstName, u.lastName, u.id DESC - LIMIT 2, 5 - RETURN u -``` - -In above example, the documents of *users* are sorted, the first two results -get skipped and it returns the next five user documents. - -Note that variables, expressions and subqueries can not be used for *offset* and -*count*. The values for *offset* and *count* must be known at query compile time, -which means that you can only use number literals, bind parameters or expressions -that can be resolved at query compile time. - -Where a *LIMIT* is used in relation to other operations in a query has meaning. -*LIMIT* operations before *FILTER*s in particular can change the result -significantly, because the operations are executed in the order in which they -are written in the query. See [FILTER](Filter.md#order-of-operations) for a -detailed example. - diff --git a/Documentation/Books/AQL/Operations/README.md b/Documentation/Books/AQL/Operations/README.md deleted file mode 100644 index 4a36190db129..000000000000 --- a/Documentation/Books/AQL/Operations/README.md +++ /dev/null @@ -1,18 +0,0 @@ -High-level operations -===================== - -The following high-level operations are described here after: - -* [FOR](For.md): Iterate over all elements of an array. -* [RETURN](Return.md): Produce the result of a query. -* [FILTER](Filter.md): Restrict the results to elements that match arbitrary logical conditions. -* [SORT](Sort.md): Force a sort of the array of already produced intermediate results. -* [LIMIT](Limit.md): Reduce the number of elements in the result to at most the specified number, optionally skip elements (pagination). -* [LET](Let.md): Assign an arbitrary value to a variable. -* [COLLECT](Collect.md): Group an array by one or multiple group criteria. Can also count and aggregate. -* [REMOVE](Remove.md): Remove documents from a collection. -* [UPDATE](Update.md): Partially update documents in a collection. -* [REPLACE](Replace.md): Completely replace documents in a collection. -* [INSERT](Insert.md): Insert new documents into a collection. -* [UPSERT](Upsert.md): Update/replace an existing document, or create it in the case it does not exist. -* [WITH](With.md): Specify collections used in a query (at query begin only). diff --git a/Documentation/Books/AQL/Operations/Remove.md b/Documentation/Books/AQL/Operations/Remove.md deleted file mode 100644 index c67e89f5e6d3..000000000000 --- a/Documentation/Books/AQL/Operations/Remove.md +++ /dev/null @@ -1,158 +0,0 @@ - -REMOVE -====== - -The *REMOVE* keyword can be used to remove documents from a collection. On a -single server, the document removal is executed transactionally in an -all-or-nothing fashion. - -If the RocksDB engine is used and intermediate commits are enabled, a query may -execute intermediate transaction commits in case the running transaction (AQL -query) hits the specified size thresholds. In this case, the query's operations -carried out so far will be committed and not rolled back in case of a later abort/rollback. -That behavior can be controlled by adjusting the intermediate commit settings for -the RocksDB engine. - -For sharded collections, the entire query and/or remove operation may not be transactional, -especially if it involves different shards and/or database servers. - -Each *REMOVE* operation is restricted to a single collection, and the -[collection name](../../Manual/Appendix/Glossary.html#collection-name) must not be dynamic. -Only a single *REMOVE* statement per collection is allowed per AQL query, and -it cannot be followed by read or write operations that access the same collection, by -traversal operations, or AQL functions that can read documents. - -The syntax for a remove operation is: - -``` -REMOVE keyExpression IN collection options -``` - -*collection* must contain the name of the collection to remove the documents -from. *keyExpression* must be an expression that contains the document identification. -This can either be a string (which must then contain the -[document key](../../Manual/Appendix/Glossary.html#document-key)) or a -document, which must contain a *_key* attribute. - -The following queries are thus equivalent: - -``` -FOR u IN users - REMOVE { _key: u._key } IN users - -FOR u IN users - REMOVE u._key IN users - -FOR u IN users - REMOVE u IN users -``` - -**Note**: A remove operation can remove arbitrary documents, and the documents -do not need to be identical to the ones produced by a preceding *FOR* statement: - -``` -FOR i IN 1..1000 - REMOVE { _key: CONCAT('test', i) } IN users - -FOR u IN users - FILTER u.active == false - REMOVE { _key: u._key } IN backup -``` - -A single document can be removed as well, using a document key string or a -document with `_key` attribute: - -``` -REMOVE 'john' IN users -``` - -``` -LET doc = DOCUMENT('users/john') -REMOVE doc IN users -``` - -The restriction of a single remove operation per query and collection -applies. The following query causes an *access after data-modification* -error because of the third remove operation: - -``` -REMOVE 'john' IN users -REMOVE 'john' IN backups // OK, different collection -REMOVE 'mary' IN users // Error, users collection again -``` - -Setting query options ---------------------- - -*options* can be used to suppress query errors that may occur when trying to -remove non-existing documents. For example, the following query will fail if one -of the to-be-deleted documents does not exist: - -``` -FOR i IN 1..1000 - REMOVE { _key: CONCAT('test', i) } IN users -``` - -By specifying the *ignoreErrors* query option, these errors can be suppressed so -the query completes: - -``` -FOR i IN 1..1000 - REMOVE { _key: CONCAT('test', i) } IN users OPTIONS { ignoreErrors: true } -``` - -To make sure data has been written to disk when a query returns, there is the *waitForSync* -query option: - -``` -FOR i IN 1..1000 - REMOVE { _key: CONCAT('test', i) } IN users OPTIONS { waitForSync: true } -``` - -In order to not accidentially remove documents that have been updated since you last fetched -them, you can use the option *ignoreRevs* to either let ArangoDB compare the `_rev` values and -only succeed if they still match, or let ArangoDB ignore them (default): - -``` -FOR i IN 1..1000 - REMOVE { _key: CONCAT('test', i), _rev: "1287623" } IN users OPTIONS { ignoreRevs: false } -``` - -In contrast to the MMFiles engine, the RocksDB engine does not require collection-level -locks. Different write operations on the same collection do not block each other, as -long as there are no _write-write conficts_ on the same documents. From an application -development perspective it can be desired to have exclusive write access on collections, -to simplify the development. Note that writes do not block reads in RocksDB. -Exclusive access can also speed up modification queries, because we avoid conflict checks. - -Use the *exclusive* option to achieve this effect on a per query basis: - -```js - FOR doc IN collection - REPLACE doc._key - WITH { replaced: true } - OPTIONS { exclusive: true } -``` - - -Returning the removed documents -------------------------------- - -The removed documents can also be returned by the query. In this case, the `REMOVE` -statement must be followed by a `RETURN` statement (intermediate `LET` statements -are allowed, too).`REMOVE` introduces the pseudo-value `OLD` to refer to the removed -documents: - -``` -REMOVE keyExpression IN collection options RETURN OLD -``` - -Following is an example using a variable named `removed` for capturing the removed -documents. For each removed document, the document key will be returned. - -``` -FOR u IN users - REMOVE u IN users - LET removed = OLD - RETURN removed._key -``` diff --git a/Documentation/Books/AQL/Operations/Replace.md b/Documentation/Books/AQL/Operations/Replace.md deleted file mode 100644 index e782ab48aa94..000000000000 --- a/Documentation/Books/AQL/Operations/Replace.md +++ /dev/null @@ -1,169 +0,0 @@ -REPLACE -======= - -The *REPLACE* keyword can be used to completely replace documents in a collection. On a -single server, the replace operation is executed transactionally in an all-or-nothing -fashion. - -If the RocksDB engine is used and intermediate commits are enabled, a query may -execute intermediate transaction commits in case the running transaction (AQL -query) hits the specified size thresholds. In this case, the query's operations -carried out so far will be committed and not rolled back in case of a later abort/rollback. -That behavior can be controlled by adjusting the intermediate commit settings for -the RocksDB engine. - -For sharded collections, the entire query and/or replace operation may not be transactional, -especially if it involves different shards and/or database servers. - -Each *REPLACE* operation is restricted to a single collection, and the -[collection name](../../Manual/Appendix/Glossary.html#collection-name) must not be dynamic. -Only a single *REPLACE* statement per collection is allowed per AQL query, and -it cannot be followed by read or write operations that access the same collection, by -traversal operations, or AQL functions that can read documents. -The system attributes *_id*, *_key* and *_rev* cannot be replaced, *_from* and *_to* can. - -The two syntaxes for a replace operation are: - -``` -REPLACE document IN collection options -REPLACE keyExpression WITH document IN collection options -``` - -*collection* must contain the name of the collection in which the documents should -be replaced. *document* is the replacement document. When using the first syntax, *document* -must also contain the *_key* attribute to identify the document to be replaced. - -``` -FOR u IN users - REPLACE { _key: u._key, name: CONCAT(u.firstName, u.lastName), status: u.status } IN users -``` - -The following query is invalid because it does not contain a *_key* attribute and -thus it is not possible to determine the documents to be replaced: - -``` -FOR u IN users - REPLACE { name: CONCAT(u.firstName, u.lastName, status: u.status) } IN users -``` - -When using the second syntax, *keyExpression* provides the document identification. -This can either be a string (which must then contain the document key) or a -document, which must contain a *_key* attribute. - -The following queries are equivalent: - -``` -FOR u IN users - REPLACE { _key: u._key, name: CONCAT(u.firstName, u.lastName) } IN users - -FOR u IN users - REPLACE u._key WITH { name: CONCAT(u.firstName, u.lastName) } IN users - -FOR u IN users - REPLACE { _key: u._key } WITH { name: CONCAT(u.firstName, u.lastName) } IN users - -FOR u IN users - REPLACE u WITH { name: CONCAT(u.firstName, u.lastName) } IN users -``` - -A replace will fully replace an existing document, but it will not modify the values -of internal attributes (such as *_id*, *_key*, *_from* and *_to*). Replacing a document -will modify a document's revision number with a server-generated value. - -A replace operation may update arbitrary documents which do not need to be identical -to the ones produced by a preceding *FOR* statement: - -``` -FOR i IN 1..1000 - REPLACE CONCAT('test', i) WITH { foobar: true } IN users - -FOR u IN users - FILTER u.active == false - REPLACE u WITH { status: 'inactive', name: u.name } IN backup -``` - -Setting query options ---------------------- - -*options* can be used to suppress query errors that may occur when trying to -replace non-existing documents or when violating unique key constraints: - -``` -FOR i IN 1..1000 - REPLACE { _key: CONCAT('test', i) } WITH { foobar: true } IN users OPTIONS { ignoreErrors: true } -``` - -To make sure data are durable when a replace query returns, there is the *waitForSync* -query option: - -``` -FOR i IN 1..1000 - REPLACE { _key: CONCAT('test', i) } WITH { foobar: true } IN users OPTIONS { waitForSync: true } -``` - -In order to not accidentially overwrite documents that have been updated since you last fetched -them, you can use the option *ignoreRevs* to either let ArangoDB compare the `_rev` value and only -succeed if they still match, or let ArangoDB ignore them (default): - -``` -FOR i IN 1..1000 - REPLACE { _key: CONCAT('test', i), _rev: "1287623" } WITH { foobar: true } IN users OPTIONS { ignoreRevs: false } -``` - - -In contrast to the MMFiles engine, the RocksDB engine does not require collection-level -locks. Different write operations on the same collection do not block each other, as -long as there are no _write-write conficts_ on the same documents. From an application -development perspective it can be desired to have exclusive write access on collections, -to simplify the development. Note that writes do not block reads in RocksDB. -Exclusive access can also speed up modification queries, because we avoid conflict checks. - -Use the *exclusive* option to achieve this effect on a per query basis: - -```js -FOR doc IN collection - REPLACE doc._key - WITH { replaced: true } IN collection - OPTIONS { exclusive: true } -``` - -Returning the modified documents --------------------------------- - -The modified documents can also be returned by the query. In this case, the `REPLACE` -statement must be followed by a `RETURN` statement (intermediate `LET` statements are -allowed, too). The `OLD` pseudo-value can be used to refer to document revisions before -the replace, and `NEW` refers to document revisions after the replace. - -Both `OLD` and `NEW` will contain all document attributes, even those not specified -in the replace expression. - - -``` -REPLACE document IN collection options RETURN OLD -REPLACE document IN collection options RETURN NEW -REPLACE keyExpression WITH document IN collection options RETURN OLD -REPLACE keyExpression WITH document IN collection options RETURN NEW -``` - -Following is an example using a variable named `previous` to return the original -documents before modification. For each replaced document, the document key will be -returned: - -``` -FOR u IN users - REPLACE u WITH { value: "test" } - IN users - LET previous = OLD - RETURN previous._key -``` - -The following query uses the `NEW` pseudo-value to return the replaced -documents (without some of their system attributes): - -``` -FOR u IN users - REPLACE u WITH { value: "test" } IN users - LET replaced = NEW - RETURN UNSET(replaced, '_key', '_id', '_rev') -``` diff --git a/Documentation/Books/AQL/Operations/Return.md b/Documentation/Books/AQL/Operations/Return.md deleted file mode 100644 index 8b2dd311ff3a..000000000000 --- a/Documentation/Books/AQL/Operations/Return.md +++ /dev/null @@ -1,202 +0,0 @@ -RETURN -====== - -The *RETURN* statement can be used to produce the result of a query. -It is mandatory to specify a *RETURN* statement at the end of each block in a -data-selection query, otherwise the query result would be undefined. Using -*RETURN* on the main level in data-modification queries is optional. - -The general syntax for *RETURN* is: - -``` -RETURN expression -``` - -The *expression* returned by *RETURN* is produced for each iteration in the block the -*RETURN* statement is placed in. That means the result of a *RETURN* statement -is **always an array**. This includes an empty array if no documents matched the -query and a single return value returned as array with one element. - -To return all elements from the currently iterated array without modification, -the following simple form can be used: - -``` -FOR variableName IN expression - RETURN variableName -``` - -As *RETURN* allows specifying an expression, arbitrary computations can be -performed to calculate the result elements. Any of the variables valid in the -scope the *RETURN* is placed in can be used for the computations. - -To iterate over all documents of a collection called *users* and return the -full documents, you can write: - -```js -FOR u IN users - RETURN u -``` - -In each iteration of the for-loop, a document of the *users* collection is -assigned to a variable *u* and returned unmodified in this example. To return -only one attribute of each document, you could use a different return expression: - -```js -FOR u IN users - RETURN u.name -``` - -Or to return multiple attributes, an object can be constructed like this: - -```js -FOR u IN users - RETURN { name: u.name, age: u.age } -``` - -Note: *RETURN* will close the current scope and eliminate all local variables in it. -This is important to remember when working with [subqueries](../Examples/CombiningQueries.md). - -[Dynamic attribute names](../Fundamentals/DataTypes.md#objects--documents) are -supported as well: - -```js -FOR u IN users - RETURN { [ u._id ]: u.age } -``` - -The document *_id* of every user is used as expression to compute the -attribute key in this example: - -```json -[ - { - "users/9883": 32 - }, - { - "users/9915": 27 - }, - { - "users/10074": 69 - } -] -``` - -The result contains one object per user with a single key/value pair each. -This is usually not desired. For a single object, that maps user IDs to ages, -the individual results need to be merged and returned with another `RETURN`: - -```js -RETURN MERGE( - FOR u IN users - RETURN { [ u._id ]: u.age } -) -``` - -```json -[ - { - "users/10074": 69, - "users/9883": 32, - "users/9915": 27 - } -] -``` - -Keep in mind that if the key expression evaluates to the same value multiple -times, only one of the key/value pairs with the duplicate name will survive -[MERGE()](../Functions/Document.md#merge). To avoid this, you can go without -dynamic attribute names, use static names instead and return all document -properties as attribute values: - -```js -FOR u IN users - RETURN { name: u.name, age: u.age } -``` - -```json -[ - { - "name": "John Smith", - "age": 32 - }, - { - "name": "James Hendrix", - "age": 69 - }, - { - "name": "Katie Foster", - "age": 27 - } -] -``` - -RETURN DISTINCT ---------------- - -Since ArangoDB 2.7, *RETURN* can optionally be followed by the *DISTINCT* keyword. -The *DISTINCT* keyword will ensure uniqueness of the values returned by the -*RETURN* statement: - -``` -FOR variableName IN expression - RETURN DISTINCT expression -``` - -If the *DISTINCT* is applied on an expression that itself is an array or a subquery, -the *DISTINCT* will not make the values in each array or subquery result unique, but instead -ensure that the result contains only distinct arrays or subquery results. To make -the result of an array or a subquery unique, simply apply the *DISTINCT* for the -array or the subquery. - -For example, the following query will apply *DISTINCT* on its subquery results, -but not inside the subquery: - -``` -FOR what IN 1..2 - RETURN DISTINCT ( - FOR i IN [ 1, 2, 3, 4, 1, 3 ] - RETURN i - ) -``` - -Here we'll have a *FOR* loop with two iterations that each execute a subquery. The -*DISTINCT* here is applied on the two subquery results. Both subqueries return the -same result value (that is [ 1, 2, 3, 4, 1, 3 ]), so after *DISTINCT* there will -only be one occurrence of the value [ 1, 2, 3, 4, 1, 3 ] left: - -``` -[ - [ 1, 2, 3, 4, 1, 3 ] -] -``` - -If the goal is to apply the *DISTINCT* inside the subquery, it needs to be moved -there: - -``` -FOR what IN 1..2 - LET sub = ( - FOR i IN [ 1, 2, 3, 4, 1, 3 ] - RETURN DISTINCT i - ) - RETURN sub -``` - -In the above case, the *DISTINCT* will make the subquery results unique, so that -each subquery will return a unique array of values ([ 1, 2, 3, 4 ]). As the subquery -is executed twice and there is no *DISTINCT* on the top-level, that array will be -returned twice: - -``` -[ - [ 1, 2, 3, 4 ], - [ 1, 2, 3, 4 ] -] -``` - -Note: the order of results was undefined for *RETURN DISTINCT* until before ArangoDB -3.3. Starting with ArangoDB 3.3, *RETURN DISTINCT* will not change the order of the -results it is applied on. - -Note: *RETURN DISTINCT* is not allowed on the top-level of a query if there is no *FOR* -loop preceding it. diff --git a/Documentation/Books/AQL/Operations/Sort.md b/Documentation/Books/AQL/Operations/Sort.md deleted file mode 100644 index 622990c0f8c3..000000000000 --- a/Documentation/Books/AQL/Operations/Sort.md +++ /dev/null @@ -1,62 +0,0 @@ - -SORT -==== - -The *SORT* statement will force a sort of the array of already produced -intermediate results in the current block. *SORT* allows specifying one or -multiple sort criteria and directions. The general syntax is: - -``` -SORT expression direction -``` - -Example query that is sorting by lastName (in ascending order), then firstName -(in ascending order), then by id (in descending order): - -``` -FOR u IN users - SORT u.lastName, u.firstName, u.id DESC - RETURN u -``` - -Specifying the *direction* is optional. The default (implicit) direction for a -sort expression is the ascending order. To explicitly specify the sort direction, -the keywords *ASC* (ascending) and *DESC* can be used. Multiple sort criteria can be -separated using commas. In this case the direction is specified for each -expression sperately. For example - -``` -SORT doc.lastName, doc.firstName -``` - -will first sort documents by lastName in ascending order and then by -firstName in ascending order. - -``` -SORT doc.lastName DESC, doc.firstName -``` - -will first sort documents by lastName in descending order and then by -firstName in ascending order. - -``` -SORT doc.lastName, doc.firstName DESC -``` - -will first sort documents by lastName in ascending order and then by -firstName in descending order. - - -Note: when iterating over collection-based arrays, the order of documents is -always undefined unless an explicit sort order is defined using *SORT*. - - -Note that constant *SORT* expressions can be used to indicate that no particular -sort order is desired. Constant *SORT* expressions will be optimized away by the AQL -optimizer during optimization, but specifying them explicitly may enable further -optimizations if the optimizer does not need to take into account any particular -sort order. This is especially the case after a *COLLECT* statement, which is -supposed to produce a sorted result. Specifying an extra *SORT null* after the -*COLLECT* statement allows to AQL optimizer to remove the post-sorting of the -collect results altogether. - diff --git a/Documentation/Books/AQL/Operations/Update.md b/Documentation/Books/AQL/Operations/Update.md deleted file mode 100644 index 41cd753e33fd..000000000000 --- a/Documentation/Books/AQL/Operations/Update.md +++ /dev/null @@ -1,298 +0,0 @@ -UPDATE -====== - -The *UPDATE* keyword can be used to partially update documents in a collection. On a -single server, updates are executed transactionally in an all-or-nothing fashion. - -If the RocksDB engine is used and intermediate commits are enabled, a query may -execute intermediate transaction commits in case the running transaction (AQL -query) hits the specified size thresholds. In this case, the query's operations -carried out so far will be committed and not rolled back in case of a later abort/rollback. -That behavior can be controlled by adjusting the intermediate commit settings for -the RocksDB engine. - -For sharded collections, the entire query and/or update operation may not be transactional, -especially if it involves different shards and/or database servers. - -Each *UPDATE* operation is restricted to a single collection, and the -[collection name](../../Manual/Appendix/Glossary.html#collection-name) must not be dynamic. -Only a single *UPDATE* statement per collection is allowed per AQL query, and -it cannot be followed by read or write operations that access the same collection, by -traversal operations, or AQL functions that can read documents. -The system attributes *_id*, *_key* and *_rev* cannot be updated, *_from* and *_to* can. - -The two syntaxes for an update operation are: - -``` -UPDATE document IN collection options -UPDATE keyExpression WITH document IN collection options -``` - -*collection* must contain the name of the collection in which the documents should -be updated. *document* must be a document that contains the attributes and values -to be updated. When using the first syntax, *document* must also contain the *_key* -attribute to identify the document to be updated. - -```js -FOR u IN users - UPDATE { _key: u._key, name: CONCAT(u.firstName, " ", u.lastName) } IN users -``` - -The following query is invalid because it does not contain a *_key* attribute and -thus it is not possible to determine the documents to be updated: - -```js -FOR u IN users - UPDATE { name: CONCAT(u.firstName, " ", u.lastName) } IN users -``` - -When using the second syntax, *keyExpression* provides the document identification. -This can either be a string (which must then contain the document key) or a -document, which must contain a *_key* attribute. - -An object with `_id` attribute but without `_key` attribute as well as a -document ID as string like `"users/john"` do not work. However, you can use -`DOCUMENT(id)` to fetch the document via its ID and `PARSE_IDENTIFIER(id).key` -to get the document key as string. - -The following queries are equivalent: - -```js -FOR u IN users - UPDATE u._key WITH { name: CONCAT(u.firstName, " ", u.lastName) } IN users - -FOR u IN users - UPDATE { _key: u._key } WITH { name: CONCAT(u.firstName, " ", u.lastName) } IN users - -FOR u IN users - UPDATE u WITH { name: CONCAT(u.firstName, " ", u.lastName) } IN users -``` - -An update operation may update arbitrary documents which do not need to be identical -to the ones produced by a preceding *FOR* statement: - -```js -FOR i IN 1..1000 - UPDATE CONCAT('test', i) WITH { foobar: true } IN users - -FOR u IN users - FILTER u.active == false - UPDATE u WITH { status: 'inactive' } IN backup -``` - -Using the current value of a document attribute ------------------------------------------------ - -The pseudo-variable `OLD` is not supported inside of `WITH` clauses (it is -available after `UPDATE`). To access the current attribute value, you can -usually refer to a document via the variable of the `FOR` loop, which is used -to iterate over a collection: - -```js -FOR doc IN users - UPDATE doc WITH { - fullName: CONCAT(doc.firstName, " ", doc.lastName) - } IN users -``` - -If there is no loop, because a single document is updated only, then there -might not be a variable like above (`doc`), which would let you refer to the -document which is being updated: - -```js -UPDATE "john" WITH { ... } IN users -``` - -```js -LET key = PARSE_IDENTIFIER("users/john").key -UPDATE key WITH { ... } IN users -``` - -To access the current value in this situation, the document has to be retrieved -and stored in a variable first: - -```js -LET doc = DOCUMENT("users/john") -UPDATE doc WITH { - fullName: CONCAT(doc.firstName, " ", doc.lastName) -} IN users -``` - -An existing attribute can be modified based on its current value this way, -to increment a counter for instance: - -```js -UPDATE doc WITH { - karma: doc.karma + 1 -} IN users -``` - -If the attribute `karma` doesn't exist yet, `doc.karma` is evaluated to *null*. -The expression `null + 1` results in the new attribute `karma` being set to *1*. -If the attribute does exist, then it is increased by *1*. - -Arrays can be mutated too of course: - -```js -UPDATE doc WITH { - hobbies: PUSH(doc.hobbies, "swimming") -} IN users -``` - -If the attribute `hobbies` doesn't exist yet, it is conveniently initialized -as `[ "swimming" ]` and otherwise extended. - -Setting query options ---------------------- - -*options* can be used to suppress query errors that may occur when trying to -update non-existing documents or violating unique key constraints: - -```js -FOR i IN 1..1000 - UPDATE { - _key: CONCAT('test', i) - } WITH { - foobar: true - } IN users OPTIONS { ignoreErrors: true } -``` - -An update operation will only update the attributes specified in *document* and -leave other attributes untouched. Internal attributes (such as *_id*, *_key*, *_rev*, -*_from* and *_to*) cannot be updated and are ignored when specified in *document*. -Updating a document will modify the document's revision number with a server-generated value. - -When updating an attribute with a null value, ArangoDB will not remove the attribute -from the document but store a null value for it. To get rid of attributes in an update -operation, set them to null and provide the *keepNull* option: - -```js -FOR u IN users - UPDATE u WITH { - foobar: true, - notNeeded: null - } IN users OPTIONS { keepNull: false } -``` - -The above query will remove the *notNeeded* attribute from the documents and update -the *foobar* attribute normally. - -There is also the option *mergeObjects* that controls whether object contents will be -merged if an object attribute is present in both the *UPDATE* query and in the -to-be-updated document. - -The following query will set the updated document's *name* attribute to the exact -same value that is specified in the query. This is due to the *mergeObjects* option -being set to *false*: - -```js -FOR u IN users - UPDATE u WITH { - name: { first: "foo", middle: "b.", last: "baz" } - } IN users OPTIONS { mergeObjects: false } -``` - -Contrary, the following query will merge the contents of the *name* attribute in the -original document with the value specified in the query: - -```js -FOR u IN users - UPDATE u WITH { - name: { first: "foo", middle: "b.", last: "baz" } - } IN users OPTIONS { mergeObjects: true } -``` - -Attributes in *name* that are present in the to-be-updated document but not in the -query will now be preserved. Attributes that are present in both will be overwritten -with the values specified in the query. - -Note: the default value for *mergeObjects* is *true*, so there is no need to specify it -explicitly. - -To make sure data are durable when an update query returns, there is the *waitForSync* -query option: - -```js -FOR u IN users - UPDATE u WITH { - foobar: true - } IN users OPTIONS { waitForSync: true } -``` - -In order to not accidentially overwrite documents that have been updated since you last fetched -them, you can use the option *ignoreRevs* to either let ArangoDB compare the `_rev` value and -only succeed if they still match, or let ArangoDB ignore them (default): - -```js -FOR i IN 1..1000 - UPDATE { _key: CONCAT('test', i), _rev: "1287623" } - WITH { foobar: true } IN users - OPTIONS { ignoreRevs: false } -``` - -In contrast to the MMFiles engine, the RocksDB engine does not require collection-level -locks. Different write operations on the same collection do not block each other, as -long as there are no _write-write conficts_ on the same documents. From an application -development perspective it can be desired to have exclusive write access on collections, -to simplify the development. Note that writes do not block reads in RocksDB. -Exclusive access can also speed up modification queries, because we avoid conflict checks. - -Use the *exclusive* option to achieve this effect on a per query basis: - -```js -FOR doc IN collection - UPDATE doc - WITH { updated: true } IN collection - OPTIONS { exclusive: true } -``` - - -Returning the modified documents --------------------------------- - -The modified documents can also be returned by the query. In this case, the `UPDATE` -statement needs to be followed a `RETURN` statement (intermediate `LET` statements -are allowed, too). These statements can refer to the pseudo-values `OLD` and `NEW`. -The `OLD` pseudo-value refers to the document revisions before the update, and `NEW` -refers to document revisions after the update. - -Both `OLD` and `NEW` will contain all document attributes, even those not specified -in the update expression. - -``` -UPDATE document IN collection options RETURN OLD -UPDATE document IN collection options RETURN NEW -UPDATE keyExpression WITH document IN collection options RETURN OLD -UPDATE keyExpression WITH document IN collection options RETURN NEW -``` - -Following is an example using a variable named `previous` to capture the original -documents before modification. For each modified document, the document key is returned. - -```js -FOR u IN users - UPDATE u WITH { value: "test" } - IN users - LET previous = OLD - RETURN previous._key -``` - -The following query uses the `NEW` pseudo-value to return the updated documents, -without some of the system attributes: - -```js -FOR u IN users - UPDATE u WITH { value: "test" } - IN users - LET updated = NEW - RETURN UNSET(updated, "_key", "_id", "_rev") -``` - -It is also possible to return both `OLD` and `NEW`: - -```js -FOR u IN users - UPDATE u WITH { value: "test" } - IN users - RETURN { before: OLD, after: NEW } -``` diff --git a/Documentation/Books/AQL/Operations/Upsert.md b/Documentation/Books/AQL/Operations/Upsert.md deleted file mode 100644 index b64262eace80..000000000000 --- a/Documentation/Books/AQL/Operations/Upsert.md +++ /dev/null @@ -1,141 +0,0 @@ -UPSERT -====== - -The *UPSERT* keyword can be used for checking whether certain documents exist, -and to update/replace them in case they exist, or create them in case they do not exist. -On a single server, upserts are executed transactionally in an all-or-nothing fashion. - -If the RocksDB engine is used and intermediate commits are enabled, a query may -execute intermediate transaction commits in case the running transaction (AQL -query) hits the specified size thresholds. In this case, the query's operations -carried out so far will be committed and not rolled back in case of a later abort/rollback. -That behavior can be controlled by adjusting the intermediate commit settings for -the RocksDB engine. - -For sharded collections, the entire query and/or upsert operation may not be transactional, -especially if it involves different shards and/or database servers. - -Each *UPSERT* operation is restricted to a single collection, and the -[collection name](../../Manual/Appendix/Glossary.html#collection-name) must not be dynamic. -Only a single *UPSERT* statement per collection is allowed per AQL query, and -it cannot be followed by read or write operations that access the same collection, by -traversal operations, or AQL functions that can read documents. - -The syntax for an upsert operation is: - -``` -UPSERT searchExpression INSERT insertExpression UPDATE updateExpression IN collection options -UPSERT searchExpression INSERT insertExpression REPLACE updateExpression IN collection options -``` - -When using the *UPDATE* variant of the upsert operation, the found document will be -partially updated, meaning only the attributes specified in *updateExpression* will be -updated or added. When using the *REPLACE* variant of upsert, existing documents will -be replaced with the contexts of *updateExpression*. - -Updating a document will modify the document's revision number with a server-generated value. -The system attributes *_id*, *_key* and *_rev* cannot be updated, *_from* and *_to* can. - -The *searchExpression* contains the document to be looked for. It must be an object -literal without dynamic attribute names. In case no such document can be found in -*collection*, a new document will be inserted into the collection as specified in the -*insertExpression*. - -In case at least one document in *collection* matches the *searchExpression*, it will -be updated using the *updateExpression*. When more than one document in the collection -matches the *searchExpression*, it is undefined which of the matching documents will -be updated. It is therefore often sensible to make sure by other means (such as unique -indexes, application logic etc.) that at most one document matches *searchExpression*. - -The following query will look in the *users* collection for a document with a specific -*name* attribute value. If the document exists, its *logins* attribute will be increased -by one. If it does not exist, a new document will be inserted, consisting of the -attributes *name*, *logins*, and *dateCreated*: - -``` -UPSERT { name: 'superuser' } -INSERT { name: 'superuser', logins: 1, dateCreated: DATE_NOW() } -UPDATE { logins: OLD.logins + 1 } IN users -``` - -Note that in the *UPDATE* case it is possible to refer to the previous version of the -document using the *OLD* pseudo-value. - - -Setting query options ---------------------- - -As in several above examples, the *ignoreErrors* option can be used to suppress query -errors that may occur when trying to violate unique key constraints. - -When updating or replacing an attribute with a null value, ArangoDB will not remove the -attribute from the document but store a null value for it. To get rid of attributes in -an upsert operation, set them to null and provide the *keepNull* option. - -There is also the option *mergeObjects* that controls whether object contents will be -merged if an object attribute is present in both the *UPDATE* query and in the -to-be-updated document. - -Note: the default value for *mergeObjects* is *true*, so there is no need to specify it -explicitly. - -To make sure data are durable when an update query returns, there is the *waitForSync* -query option. - -In order to not accidentially update documents that have been written and updated since -you last fetched them you can use the option *ignoreRevs* to either let ArangoDB compare -the `_rev` value and only succeed if they still match, or let ArangoDB ignore them (default): - -``` -FOR i IN 1..1000 - UPSERT { _key: CONCAT('test', i)} - INSERT {foobar: false} - UPDATE {_rev: "1287623", foobar: true } - IN users OPTIONS { ignoreRevs: false } -``` - -*NOTE*: You need to add the `_rev` value in the updateExpression, it will not be used within -the searchExpression. Even worse, if you use an outdated `_rev` in the searchExpression -UPSERT will trigger the INSERT path instead of the UPDATE path, because it has not found a document -exactly matching the searchExpression. - -In contrast to the MMFiles engine, the RocksDB engine does not require collection-level -locks. Different write operations on the same collection do not block each other, as -long as there are no _write-write conficts_ on the same documents. From an application -development perspective it can be desired to have exclusive write access on collections, -to simplify the development. Note that writes do not block reads in RocksDB. -Exclusive access can also speed up modification queries, because we avoid conflict checks. - -Use the *exclusive* option to achieve this effect on a per query basis: - -```js -FOR i IN 1..1000 - UPSERT { _key: CONCAT('test', i) } - INSERT { foobar: false } - UPDATE { foobar: true } - IN users OPTIONS { exclusive: true } -``` - -Returning documents -------------------- - -`UPSERT` statements can optionally return data. To do so, they need to be followed -by a `RETURN` statement (intermediate `LET` statements are allowed, too). These statements -can optionally perform calculations and refer to the pseudo-values `OLD` and `NEW`. -In case the upsert performed an insert operation, `OLD` will have a value of *null*. -In case the upsert performed an update or replace operation, `OLD` will contain the -previous version of the document, before update/replace. - -`NEW` will always be populated. It will contain the inserted document in case the -upsert performed an insert, or the updated/replaced document in case it performed an -update/replace. - -This can also be used to check whether the upsert has performed an insert or an update -internally: - -``` -UPSERT { name: 'superuser' } -INSERT { name: 'superuser', logins: 1, dateCreated: DATE_NOW() } -UPDATE { logins: OLD.logins + 1 } IN users -RETURN { doc: NEW, type: OLD ? 'update' : 'insert' } -``` diff --git a/Documentation/Books/AQL/Operations/With.md b/Documentation/Books/AQL/Operations/With.md deleted file mode 100644 index 82d96dc54560..000000000000 --- a/Documentation/Books/AQL/Operations/With.md +++ /dev/null @@ -1,42 +0,0 @@ - -WITH -==== - -An AQL query can optionally start with a *WITH* statement and the list of -collections used by the query. All collections specified in *WITH* will be -read-locked at query start, in addition to the other collections the query -uses and that are detected by the AQL query parser. - -Specifying further collections in *WITH* can be useful for queries that -dynamically access collections (e.g. via traversals or via dynamic -document access functions such as `DOCUMENT()`). Such collections may be -invisible to the AQL query parser at query compile time, and thus will not -be read-locked automatically at query start. In this case, the AQL execution -engine will lazily lock these collections whenever they are used, which can -lead to deadlock with other queries. In case such deadlock is detected, the -query will automatically be aborted and changes will be rolled back. In this -case the client application can try sending the query again. -However, if client applications specify the list of used collections for all -their queries using *WITH*, then no deadlocks will happen and no queries will -be aborted due to deadlock situations. - -From ArangoDB 3.1 onwards `WITH` is required for traversals in a -clustered environment in order to avoid deadlocks. - -Note that for queries that access only a single collection or that have all -collection names specified somewhere else in the query string, there is no -need to use *WITH*. *WITH* is only useful when the AQL query parser cannot -automatically figure out which collections are going to be used by the query. -*WITH* is only useful for queries that dynamically access collections, e.g. -via traversals, shortest path operations or the *DOCUMENT()* function. - -``` -WITH managers, usersHaveManagers -FOR v, e, p IN OUTBOUND 'users/1' GRAPH 'userGraph' - RETURN { v, e, p } -``` - -Note that constant *WITH* is also a keyword that is used in other contexts, -for example in *UPDATE* statements. If *WITH* is used to specify the extra -list of collections, then it must be placed at the very start of the query -string. diff --git a/Documentation/Books/AQL/Operators.md b/Documentation/Books/AQL/Operators.md deleted file mode 100644 index 045e8c7dfbd0..000000000000 --- a/Documentation/Books/AQL/Operators.md +++ /dev/null @@ -1,346 +0,0 @@ -Operators -========= - -AQL supports a number of operators that can be used in expressions. There are -comparison, logical, arithmetic, and the ternary operator. - -Comparison operators --------------------- - -Comparison (or relational) operators compare two operands. They can be used with -any input data types, and will return a boolean result value. - -The following comparison operators are supported: - -- *==* equality -- *!=* inequality -- *<* less than -- *<=* less or equal -- *>* greater than -- *>=* greater or equal -- *IN* test if a value is contained in an array -- *NOT IN* test if a value is not contained in an array -- *LIKE* tests if a string value matches a pattern -- *=~* tests if a string value matches a regular expression -- *!~* tests if a string value does not match a regular expression - -Each of the comparison operators returns a boolean value if the comparison can -be evaluated and returns *true* if the comparison evaluates to true, and *false* -otherwise. - -The comparison operators accept any data types for the first and second operands. -However, *IN* and *NOT IN* will only return a meaningful result if their right-hand -operand is an array, and *LIKE* will only execute if both operands are string values. -The comparison operators will not perform any implicit type casts if the compared -operands have different or non-sensible types. - -Some examples for comparison operations in AQL: - -``` -0 == null // false -1 > 0 // true -true != null // true -45 <= "yikes!" // true -65 != "65" // true -65 == 65 // true -1.23 > 1.32 // false -1.5 IN [ 2, 3, 1.5 ] // true -"foo" IN null // false -42 NOT IN [ 17, 40, 50 ] // true -"abc" == "abc" // true -"abc" == "ABC" // false -"foo" LIKE "f%" // true -"foo" =~ "^f[o].$" // true -"foo" !~ "[a-z]+bar$" // true -``` - -The *LIKE* operator checks whether its left operand matches the pattern specified -in its right operand. The pattern can consist of regular characters and wildcards. -The supported wildcards are *_* to match a single arbitrary character, and *%* to -match any number of arbitrary characters. Literal *%* and *_* need to be escaped -with a backslash. Backslashes need to be escaped themselves, which effectively -means that two reverse solidus characters need to preceed a literal percent sign -or underscore. In arangosh, additional escaping is required, making it four -backslashes in total preceeding the to-be-escaped character. - -``` -"abc" LIKE "a%" // true -"abc" LIKE "_bc" // true -"a_b_foo" LIKE "a\\_b\\_foo" // true -``` - -The pattern matching performed by the *LIKE* operator is case-sensitive. - -The regular expression operators *=~* and *!~* expect their left-hand operands to -be strings, and their right-hand operands to be strings containing valid regular -expressions as specified in the documentation for the AQL function -[REGEX_TEST()](Functions/String.md#regextest). - -Array comparison operators --------------------------- - -The comparison operators also exist as *array variant*. In the array -variant, the operator is prefixed with one of the keywords *ALL*, *ANY* -or *NONE*. Using one of these keywords changes the operator behavior to -execute the comparison operation for all, any, or none of its left hand -argument values. It is therefore expected that the left hand argument -of an array operator is an array. - -Examples: - -``` -[ 1, 2, 3 ] ALL IN [ 2, 3, 4 ] // false -[ 1, 2, 3 ] ALL IN [ 1, 2, 3 ] // true -[ 1, 2, 3 ] NONE IN [ 3 ] // false -[ 1, 2, 3 ] NONE IN [ 23, 42 ] // true -[ 1, 2, 3 ] ANY IN [ 4, 5, 6 ] // false -[ 1, 2, 3 ] ANY IN [ 1, 42 ] // true -[ 1, 2, 3 ] ANY == 2 // true -[ 1, 2, 3 ] ANY == 4 // false -[ 1, 2, 3 ] ANY > 0 // true -[ 1, 2, 3 ] ANY <= 1 // true -[ 1, 2, 3 ] NONE < 99 // false -[ 1, 2, 3 ] NONE > 10 // true -[ 1, 2, 3 ] ALL > 2 // false -[ 1, 2, 3 ] ALL > 0 // true -[ 1, 2, 3 ] ALL >= 3 // false -["foo", "bar"] ALL != "moo" // true -["foo", "bar"] NONE == "bar" // false -["foo", "bar"] ANY == "foo" // true -``` - -Note that these operators are not optimized yet. Indexes will not be utilized. - -Logical operators ------------------ - -The following logical operators are supported in AQL: - -- *&&* logical and operator -- *||* logical or operator -- *!* logical not/negation operator - -AQL also supports the following alternative forms for the logical operators: - -- *AND* logical and operator -- *OR* logical or operator -- *NOT* logical not/negation operator - -The alternative forms are aliases and functionally equivalent to the regular -operators. - -The two-operand logical operators in AQL will be executed with short-circuit -evaluation (except if one of the operands is or includes a subquery. In this -case the subquery will be pulled out an evaluated before the logical operator). - -The result of the logical operators in AQL is defined as follows: - -- `lhs && rhs` will return `lhs` if it is `false` or would be `false` when converted - into a boolean. If `lhs` is `true` or would be `true` when converted to a boolean, - `rhs` will be returned. -- `lhs || rhs` will return `lhs` if it is `true` or would be `true` when converted - into a boolean. If `lhs` is `false` or would be `false` when converted to a boolean, - `rhs` will be returned. -- `! value` will return the negated value of `value` converted into a boolean - -Some examples for logical operations in AQL: - -```js -u.age > 15 && u.address.city != "" -true || false -NOT u.isInvalid -1 || ! 0 -``` - -Passing non-boolean values to a logical operator is allowed. Any non-boolean operands -will be casted to boolean implicitly by the operator, without making the query abort. - -The *conversion to a boolean value* works as follows: -- `null` will be converted to `false` -- boolean values remain unchanged -- all numbers unequal to zero are `true`, zero is `false` -- an empty string is `false`, all other strings are `true` -- arrays (`[ ]`) and objects / documents (`{ }`) are `true`, regardless of their contents - -The result of *logical and* and *logical or* operations can now have any data -type and is not necessarily a boolean value. - -For example, the following logical operations will return boolean values: - -```js -25 > 1 && 42 != 7 // true -22 IN [ 23, 42 ] || 23 NOT IN [ 22, 7 ] // true -25 != 25 // false -``` - -whereas the following logical operations will not return boolean values: - -```js -1 || 7 // 1 -null || "foo" // "foo" -null && true // null -true && 23 // 23 -``` - -Arithmetic operators --------------------- - -Arithmetic operators perform an arithmetic operation on two numeric -operands. The result of an arithmetic operation is again a numeric value. - -AQL supports the following arithmetic operators: - -- *+* addition -- *-* subtraction -- \* multiplication -- */* division -- *%* modulus - -Unary plus and unary minus are supported as well: - -```js -LET x = -5 -LET y = 1 -RETURN [-x, +y] -// [5, 1] -``` - -For exponentiation, there is a [numeric function](Functions/Numeric.md#pow) *POW()*. -The syntax `base ** exp` is not supported. - -For string concatenation, you must use the [string function](Functions/String.md#concat) -*CONCAT()*. Combining two strings with a plus operator (`"foo" + "bar"`) will not work! -Also see [Common Errors](CommonErrors.md). - -Some example arithmetic operations: - -``` -1 + 1 -33 - 99 -12.4 * 4.5 -13.0 / 0.1 -23 % 7 --15 -+9.99 -``` - -The arithmetic operators accept operands of any type. Passing non-numeric values to an -arithmetic operator will cast the operands to numbers using the type casting rules -applied by the [TO_NUMBER()](Functions/TypeCast.md#tonumber) function: - -- `null` will be converted to `0` -- `false` will be converted to `0`, true will be converted to `1` -- a valid numeric value remains unchanged, but NaN and Infinity will be converted to `0` -- string values are converted to a number if they contain a valid string representation - of a number. Any whitespace at the start or the end of the string is ignored. Strings - with any other contents are converted to the number `0` -- an empty array is converted to `0`, an array with one member is converted to the numeric - representation of its sole member. Arrays with more members are converted to the number - `0`. -- objects / documents are converted to the number `0`. - -An arithmetic operation that produces an invalid value, such as `1 / 0` (division by zero) -will also produce a result value of `null`. The query is not aborted, but you may see a -warning. - -Here are a few examples: - -``` -1 + "a" // 1 -1 + "99" // 100 -1 + null // 1 -null + 1 // 1 -3 + [ ] // 3 -24 + [ 2 ] // 26 -24 + [ 2, 4 ] // 0 -25 - null // 25 -17 - true // 16 -23 * { } // 0 -5 * [ 7 ] // 35 -24 / "12" // 2 -1 / 0 // 0 -``` - -Ternary operator ----------------- - -AQL also supports a ternary operator that can be used for conditional -evaluation. The ternary operator expects a boolean condition as its first -operand, and it returns the result of the second operand if the condition -evaluates to true, and the third operand otherwise. - -*Examples* - -```js -u.age > 15 || u.active == true ? u.userId : null -``` - -There is also a shortcut variant of the ternary operator with just two -operands. This variant can be used when the expression for the boolean -condition and the return value should be the same: - -*Examples* - -```js -u.value ? : 'value is null, 0 or not present' -``` - - -Range operator --------------- - -AQL supports expressing simple numeric ranges with the *..* operator. -This operator can be used to easily iterate over a sequence of numeric -values. - -The *..* operator will produce an array of the integer values in the -defined range, with both bounding values included. - -*Examples* - -``` -2010..2013 -``` - -will produce the following result: - -```json -[ 2010, 2011, 2012, 2013 ] -``` - -Using the range operator is equivalent to writing an array with the integer -values in the range specified by the bounds of the range. If the bounds of -the range operator are non-integers, they will be converted to integer -values first. - -There is also a [RANGE() function](Functions/Numeric.md#range). - -Array operators ---------------- - -AQL provides array operators [\*] for -[array variable expansion](Advanced/ArrayOperators.md#array-expansion) and -[\*\*] for [array contraction](Advanced/ArrayOperators.md#array-contraction). - -Operator precedence -------------------- - -The operator precedence in AQL is similar as in other familiar languages (lowest precedence first): - -- *? :* ternary operator -- *||* logical or -- *&&* logical and -- *==*, *!=* equality and inequality -- *IN* in operator -- *<*, *<=*, *>=*, *>* less than, less equal, - greater equal, greater than -- *+*, *-* addition, subtraction -- \*, */*, *%* multiplication, division, modulus -- *!*, *+*, *-* logical negation, unary plus, unary minus -- [\*] expansion -- *()* function call -- *.* member access -- *[]* indexed value access - -The parentheses *(* and *)* can be used to enforce a different operator -evaluation order. diff --git a/Documentation/Books/AQL/README.md b/Documentation/Books/AQL/README.md deleted file mode 100644 index 61886a0af3b8..000000000000 --- a/Documentation/Books/AQL/README.md +++ /dev/null @@ -1,42 +0,0 @@ -Introduction -============ - -The ArangoDB query language (AQL) can be used to retrieve and modify data that -are stored in ArangoDB. - -{% hint 'info' %} -Want to learn AQL for the first time? Be sure to check out the -[**Tutorial**](Tutorial/README.md) before you head off to the -in-depth documentation! -{% endhint %} - -The general workflow when executing a query is as follows: - -- A client application ships an AQL query to the ArangoDB server. The query text - contains everything ArangoDB needs to compile the result set -- ArangoDB will parse the query, execute it and compile the results. If the - query is invalid or cannot be executed, the server will return an error that - the client can process and react to. If the query can be executed - successfully, the server will return the query results (if any) to the client - -AQL is mainly a declarative language, meaning that a query expresses what result -should be achieved but not how it should be achieved. AQL aims to be -human-readable and therefore uses keywords from the English language. Another -design goal of AQL was client independency, meaning that the language and syntax -are the same for all clients, no matter what programming language the clients -may use. Further design goals of AQL were the support of complex query patterns -and the different data models ArangoDB offers. - -In its purpose, AQL is similar to the Structured Query Language (SQL). AQL supports -reading and modifying collection data, but it doesn't support data-definition -operations such as creating and dropping databases, collections and indexes. -It is a pure data manipulation language (DML), not a data definition language -(DDL) or a data control language (DCL). - -The syntax of AQL queries is different to SQL, even if some keywords overlap. -Nevertheless, AQL should be easy to understand for anyone with an SQL background. - -For some example queries, please refer to the chapters -[Data Queries](DataQueries.md), -[Usual query patterns](Examples/README.md) -and [Tutorial](Tutorial/README.md). diff --git a/Documentation/Books/AQL/SUMMARY.md b/Documentation/Books/AQL/SUMMARY.md deleted file mode 100644 index 1ecdf5593e70..000000000000 --- a/Documentation/Books/AQL/SUMMARY.md +++ /dev/null @@ -1,79 +0,0 @@ - - -# Summary -* [Introduction](README.md) -* [Tutorial](Tutorial/README.md) - * [Basic CRUD](Tutorial/CRUD.md) - * [Matching documents](Tutorial/Filter.md) - * [Sorting and limiting](Tutorial/SortLimit.md) - * [Joining together](Tutorial/Join.md) - * [Graph traversal](Tutorial/Traversal.md) - * [Geospatial queries](Tutorial/Geospatial.md) -* [How to invoke AQL](Invocation/README.md) - * [with Arangosh](Invocation/WithArangosh.md) - * [with the Web Interface](Invocation/WithWebInterface.md) -* [AQL Fundamentals](Fundamentals/README.md) - * [AQL Syntax](Fundamentals/Syntax.md) - * [Data types](Fundamentals/DataTypes.md) - * [Bind Parameters](Fundamentals/BindParameters.md) - * [Type and value order](Fundamentals/TypeValueOrder.md) - * [Accessing data from collections](Fundamentals/DocumentData.md) - * [Query Results](Fundamentals/QueryResults.md) - * [Query Errors](Fundamentals/QueryErrors.md) -* [Operators](Operators.md) -* [Data Queries](DataQueries.md) -* [High level Operations](Operations/README.md) - * [FOR](Operations/For.md) - * [RETURN](Operations/Return.md) - * [FILTER](Operations/Filter.md) - * [SORT](Operations/Sort.md) - * [LIMIT](Operations/Limit.md) - * [LET](Operations/Let.md) - * [COLLECT](Operations/Collect.md) - * [REMOVE](Operations/Remove.md) - * [UPDATE](Operations/Update.md) - * [REPLACE](Operations/Replace.md) - * [INSERT](Operations/Insert.md) - * [UPSERT](Operations/Upsert.md) - * [WITH](Operations/With.md) -* [Functions](Functions/README.md) - * [Array](Functions/Array.md) - * [Date](Functions/Date.md) - * [Document / Object](Functions/Document.md) - * [Fulltext](Functions/Fulltext.md) - * [Geo](Functions/Geo.md) - * [Miscellaneous](Functions/Miscellaneous.md) - * [Numeric](Functions/Numeric.md) - * [String](Functions/String.md) - * [Type check & cast](Functions/TypeCast.md) -* [Graphs](Graphs/README.md) - * [Traversals explained](Graphs/TraversalsExplained.md) - * [Traversals](Graphs/Traversals.md) - * [Shortest Path](Graphs/ShortestPath.md) - * [k Shortest Paths](Graphs/KShortestPaths.md) -* [ArangoSearch Views](Views/README.md) - * [Usage](Views/ArangoSearch/README.md) -* [Advanced Features](Advanced/README.md) - * [Array Operators](Advanced/ArrayOperators.md) -* [Usual Query Patterns](Examples/README.md) - * [Counting](Examples/Counting.md) - * [Data-modification queries](Examples/DataModificationQueries.md) - * [Subqueries](Examples/CombiningQueries.md) - * [Projections and filters](Examples/ProjectionsAndFilters.md) - * [Joins](Examples/Join.md) - * [Grouping](Examples/Grouping.md) - * [Traversals](Examples/CombiningGraphTraversals.md) - * [Remove vertex](Examples/RemoveVertex.md) - * [Multiple path search](Examples/MultiplePaths.md) - * [Queries without collections](Examples/QueriesNoCollections.md) -* [User Functions](Extending/README.md) - * [Conventions](Extending/Conventions.md) - * [Registering Functions](Extending/Functions.md) -* [Execution and Performance](ExecutionAndPerformance/README.md) - * [Query statistics](ExecutionAndPerformance/QueryStatistics.md) - * [Parsing queries](ExecutionAndPerformance/ParsingQueries.md) - * [Explaining queries](ExecutionAndPerformance/ExplainingQueries.md) - * [Query Profiling](ExecutionAndPerformance/QueryProfiler.md) - * [Query Optimization](ExecutionAndPerformance/Optimizer.md) - * [Caching query results](ExecutionAndPerformance/QueryCache.md) -* [Common Errors](CommonErrors.md) diff --git a/Documentation/Books/AQL/Tutorial/CRUD.md b/Documentation/Books/AQL/Tutorial/CRUD.md deleted file mode 100644 index 6cbea324b2d9..000000000000 --- a/Documentation/Books/AQL/Tutorial/CRUD.md +++ /dev/null @@ -1,343 +0,0 @@ -CRUD -==== - -- [**C**reate documents](#create-documents) -- [**R**ead documents](#read-documents) -- [**U**pdate documents](#update-documents) -- [**D**elete documents](#delete-documents) - -Create documents ----------------- - -Before we can insert documents with AQL, we need a place to put them in - a -collection. Collections can be managed via the web interface, arangosh or a -driver. It is not possible to do so with AQL however. - - -![Add Collection](Collection_Add.png) - -![Create Characters collection](Characters_Collection_Creation.png) - -Click on *COLLECTIONS* in the web interface, then *Add Collection* and type -`Characters` as name. Confirm with *Save*. The new collection should appear -in the list. - -Next, click on *QUERIES*. To create the first document for collection with AQL, -use the following AQL query, which you can paste into the query textbox and -run by clicking *Execute*: - -![Insert query in query editor](Query_Insert.png) - -```js -INSERT { - "name": "Ned", - "surname": "Stark", - "alive": true, - "age": 41, - "traits": ["A","H","C","N","P"] -} INTO Characters -``` - -The syntax is `INSERT document INTO collectionName`. The document is an object -like you may know it from JavaScript or JSON, which is comprised of attribute -key and value pairs. The quotes around the attribute keys are optional in AQL. -Keys are always character sequences (strings), whereas attribute values can -have [different types](../Fundamentals/DataTypes.md): - -- null -- boolean (true, false) -- number (integer and floating point) -- string -- array -- object - -Name and surname of the character document we inserted are both string values. -The alive state uses a boolean. Age is a numeric value. The traits are an array -of strings. The entire document is an object. - -Let's add a bunch of other characters in a single query: - -```js -LET data = [ - { "name": "Robert", "surname": "Baratheon", "alive": false, "traits": ["A","H","C"] }, - { "name": "Jaime", "surname": "Lannister", "alive": true, "age": 36, "traits": ["A","F","B"] }, - { "name": "Catelyn", "surname": "Stark", "alive": false, "age": 40, "traits": ["D","H","C"] }, - { "name": "Cersei", "surname": "Lannister", "alive": true, "age": 36, "traits": ["H","E","F"] }, - { "name": "Daenerys", "surname": "Targaryen", "alive": true, "age": 16, "traits": ["D","H","C"] }, - { "name": "Jorah", "surname": "Mormont", "alive": false, "traits": ["A","B","C","F"] }, - { "name": "Petyr", "surname": "Baelish", "alive": false, "traits": ["E","G","F"] }, - { "name": "Viserys", "surname": "Targaryen", "alive": false, "traits": ["O","L","N"] }, - { "name": "Jon", "surname": "Snow", "alive": true, "age": 16, "traits": ["A","B","C","F"] }, - { "name": "Sansa", "surname": "Stark", "alive": true, "age": 13, "traits": ["D","I","J"] }, - { "name": "Arya", "surname": "Stark", "alive": true, "age": 11, "traits": ["C","K","L"] }, - { "name": "Robb", "surname": "Stark", "alive": false, "traits": ["A","B","C","K"] }, - { "name": "Theon", "surname": "Greyjoy", "alive": true, "age": 16, "traits": ["E","R","K"] }, - { "name": "Bran", "surname": "Stark", "alive": true, "age": 10, "traits": ["L","J"] }, - { "name": "Joffrey", "surname": "Baratheon", "alive": false, "age": 19, "traits": ["I","L","O"] }, - { "name": "Sandor", "surname": "Clegane", "alive": true, "traits": ["A","P","K","F"] }, - { "name": "Tyrion", "surname": "Lannister", "alive": true, "age": 32, "traits": ["F","K","M","N"] }, - { "name": "Khal", "surname": "Drogo", "alive": false, "traits": ["A","C","O","P"] }, - { "name": "Tywin", "surname": "Lannister", "alive": false, "traits": ["O","M","H","F"] }, - { "name": "Davos", "surname": "Seaworth", "alive": true, "age": 49, "traits": ["C","K","P","F"] }, - { "name": "Samwell", "surname": "Tarly", "alive": true, "age": 17, "traits": ["C","L","I"] }, - { "name": "Stannis", "surname": "Baratheon", "alive": false, "traits": ["H","O","P","M"] }, - { "name": "Melisandre", "alive": true, "traits": ["G","E","H"] }, - { "name": "Margaery", "surname": "Tyrell", "alive": false, "traits": ["M","D","B"] }, - { "name": "Jeor", "surname": "Mormont", "alive": false, "traits": ["C","H","M","P"] }, - { "name": "Bronn", "alive": true, "traits": ["K","E","C"] }, - { "name": "Varys", "alive": true, "traits": ["M","F","N","E"] }, - { "name": "Shae", "alive": false, "traits": ["M","D","G"] }, - { "name": "Talisa", "surname": "Maegyr", "alive": false, "traits": ["D","C","B"] }, - { "name": "Gendry", "alive": false, "traits": ["K","C","A"] }, - { "name": "Ygritte", "alive": false, "traits": ["A","P","K"] }, - { "name": "Tormund", "surname": "Giantsbane", "alive": true, "traits": ["C","P","A","I"] }, - { "name": "Gilly", "alive": true, "traits": ["L","J"] }, - { "name": "Brienne", "surname": "Tarth", "alive": true, "age": 32, "traits": ["P","C","A","K"] }, - { "name": "Ramsay", "surname": "Bolton", "alive": true, "traits": ["E","O","G","A"] }, - { "name": "Ellaria", "surname": "Sand", "alive": true, "traits": ["P","O","A","E"] }, - { "name": "Daario", "surname": "Naharis", "alive": true, "traits": ["K","P","A"] }, - { "name": "Missandei", "alive": true, "traits": ["D","L","C","M"] }, - { "name": "Tommen", "surname": "Baratheon", "alive": true, "traits": ["I","L","B"] }, - { "name": "Jaqen", "surname": "H'ghar", "alive": true, "traits": ["H","F","K"] }, - { "name": "Roose", "surname": "Bolton", "alive": true, "traits": ["H","E","F","A"] }, - { "name": "The High Sparrow", "alive": true, "traits": ["H","M","F","O"] } -] - -FOR d IN data - INSERT d INTO Characters -``` - -The `LET` keyword defines a variable with name *data* and an array of objects -as value, so `LET variableName = valueExpression` and the expression being a -literal array definition like `[ {...}, {...}, ... ]`. - -`FOR variableName IN expression` is used to iterate over each element of the -*data* array. In each loop, one element is assigned to the variable *d*. -This variable is then used in the `INSERT` statement instead of a literal -object definition. What is does is basically: - -```js -INSERT { - "name": "Robert", - "surname": "Baratheon", - "alive": false, - "traits": ["A","H","C"] -} INTO Characters - -INSERT { - "name": "Jaime", - "surname": "Lannister", - "alive": true, - "age": 36, - "traits": ["A","F","B"] -} INTO Characters - -... -``` - -Note: AQL does not permit multiple `INSERT` operations that target the same -collection in in a single query. -It is allowed as body of a `FOR` loop however, inserting multiple documents -like we did with above query. - -Read documents --------------- - -There are a couple of documents in the *Characters* collection by now. We can -retrieve them all using a `FOR` loop again. This time however, we use it to -go through all documents in the collection instead of an array: - -```js -FOR c IN Characters - RETURN c -``` - -The syntax of the loop is `FOR variableName IN collectionName`. For each -document in the collection, *c* is assigned a document, which is then returned -as per the loop body. The query returns all characters we previously stored. - -Among them should be *Ned Stark*, similar to this example: - -```json - { - "_key": "2861650", - "_id": "Characters/2861650", - "_rev": "_V1bzsXa---", - "name": "Ned", - "surname": "Stark", - "alive": true, - "age": 41, - "traits": ["A","H","C","N","P"] - }, -``` - -The document features the four attributes we stored, plus three more added by -the database system. Each document needs a unique `_key`, which identifies it -within a collection. The `_id` is a computed property, a concatenation of the -collection name, a forward slash `/` and the document key. It uniquely identies -a document within a database. `_rev` is a revision ID managed by the system. - -Document keys can be provided by the user upon document creation, or a unique -value is assigned automatically. It can not be changed later. All three system -attributes starting with an underscore `_` are read-only. - -We can use either the document key or the document ID to retrieve a specific -document with the help of an AQL function `DOCUMENT()`: - -```js -RETURN DOCUMENT("Characters", "2861650") -// --- or --- -RETURN DOCUMENT("Characters/2861650") -``` - -```json -[ - { - "_key": "2861650", - "_id": "Characters/2861650", - "_rev": "_V1bzsXa---", - "name": "Ned", - "surname": "Stark", - "alive": true, - "age": 41, - "traits": ["A","H","C","N","P"] - } -] -``` - -Note: Document keys will be different for you. Change the queries accordingly. -Here, `"2861650"` is the key for the *Ned Stark* document, and `"2861653"` for -*Catelyn Stark*. - -The `DOCUMENT()` function also allows to fetch multiple documents at once: - -```js -RETURN DOCUMENT("Characters", ["2861650", "2861653"]) -// --- or --- -RETURN DOCUMENT(["Characters/2861650", "Characters/2861653"]) -``` - -```json -[ - [ - { - "_key": "2861650", - "_id": "Characters/2861650", - "_rev": "_V1bzsXa---", - "name": "Ned", - "surname": "Stark", - "alive": true, - "age": 41, - "traits": ["A","H","C","N","P"] - }, - { - "_key": "2861653", - "_id": "Characters/2861653", - "_rev": "_V1bzsXa--B", - "name": "Catelyn", - "surname": "Stark", - "alive": false, - "age": 40, - "traits": ["D","H","C"] - } - ] -] -``` - -See the [`DOCUMENT()` function](../Functions/Miscellaneous.md#document) -documentation for more details. - -Update documents ----------------- - -According to our *Ned Stark* document, he is alive. When we get to know that he -died, we need to change the `alive` attribute. Let us modify the existing document: - -```js -UPDATE "2861650" WITH { alive: false } IN Characters -``` - -The syntax is `UPDATE documentKey WITH object IN collectionName`. It updates the -specified document with the attributes listed (or adds them if they don't exist), -but leaves the rest untouched. To replace the entire document content, you may -use `REPLACE` instead of `UPDATE`: - -```js -REPLACE "2861650" WITH { - name: "Ned", - surname: "Stark", - alive: false, - age: 41, - traits: ["A","H","C","N","P"] -} IN Characters -``` - -This also works in a loop, to add a new attribute to all documents for instance: - -```js -FOR c IN Characters - UPDATE c WITH { season: 1 } IN Characters -``` - -A variable is used instead of a literal document key, to update each document. -The query adds an attribute `season` to the documents' top-level. You can -inspect the result by re-running the query that returns all documents in -collection: - -```js -FOR c IN Characters - RETURN c -``` - -```json -[ - [ - { - "_key": "2861650", - "_id": "Characters/2861650", - "_rev": "_V1bzsXa---", - "name": "Ned", - "surname": "Stark", - "alive": false, - "age": 41, - "traits": ["A","H","C","N","P"], - "season": 1 - }, - { - "_key": "2861653", - "_id": "Characters/2861653", - "_rev": "_V1bzsXa--B", - "name": "Catelyn", - "surname": "Stark", - "alive": false, - "age": 40, - "traits": ["D","H","C"], - "season": 1 - }, - { - ... - } - ] -] -``` - -Delete documents ----------------- - -To fully remove documents from a collection, there is the `REMOVE` operation. -It works similar to the other modification operations, yet without a `WITH` clause: - -```js -REMOVE "2861650" IN Characters -``` - -It can also be used in a loop body to effectively truncate a collection: - -```js -FOR c IN Characters - REMOVE c IN Characters -``` - -Note: re-run the [insert queries](#create-documents) at the top with all -character documents before you continue with the next chapter, to have data -to work with again. diff --git a/Documentation/Books/AQL/Tutorial/Characters_Collection_Creation.png b/Documentation/Books/AQL/Tutorial/Characters_Collection_Creation.png deleted file mode 100644 index 9bf38ff69dbc..000000000000 Binary files a/Documentation/Books/AQL/Tutorial/Characters_Collection_Creation.png and /dev/null differ diff --git a/Documentation/Books/AQL/Tutorial/Characters_Table.png b/Documentation/Books/AQL/Tutorial/Characters_Table.png deleted file mode 100644 index d5f9ddf29124..000000000000 Binary files a/Documentation/Books/AQL/Tutorial/Characters_Table.png and /dev/null differ diff --git a/Documentation/Books/AQL/Tutorial/ChildOf_Collection_Creation.png b/Documentation/Books/AQL/Tutorial/ChildOf_Collection_Creation.png deleted file mode 100644 index c1c240a5d038..000000000000 Binary files a/Documentation/Books/AQL/Tutorial/ChildOf_Collection_Creation.png and /dev/null differ diff --git a/Documentation/Books/AQL/Tutorial/ChildOf_Graph.png b/Documentation/Books/AQL/Tutorial/ChildOf_Graph.png deleted file mode 100644 index b1efb45f317b..000000000000 Binary files a/Documentation/Books/AQL/Tutorial/ChildOf_Graph.png and /dev/null differ diff --git a/Documentation/Books/AQL/Tutorial/Collection_Add.png b/Documentation/Books/AQL/Tutorial/Collection_Add.png deleted file mode 100644 index db818c803cb9..000000000000 Binary files a/Documentation/Books/AQL/Tutorial/Collection_Add.png and /dev/null differ diff --git a/Documentation/Books/AQL/Tutorial/Comparison_DataModels.png b/Documentation/Books/AQL/Tutorial/Comparison_DataModels.png deleted file mode 100644 index bb0d4cec89d8..000000000000 Binary files a/Documentation/Books/AQL/Tutorial/Comparison_DataModels.png and /dev/null differ diff --git a/Documentation/Books/AQL/Tutorial/Filter.md b/Documentation/Books/AQL/Tutorial/Filter.md deleted file mode 100644 index b16472d5f069..000000000000 --- a/Documentation/Books/AQL/Tutorial/Filter.md +++ /dev/null @@ -1,139 +0,0 @@ -Matching documents -================== - -So far, we either looked up a single document, or returned the entire character -collection. For the lookup, we used the `DOCUMENT()` function, which means we -can only find documents by their key or ID. - -To find documents that fulfill certain criteria more complex than key equality, -there is the `FILTER` operation in AQL, which enables us to formulate arbitrary -conditions for documents to match. - -Equality condition ------------------- - -```js -FOR c IN Characters - FILTER c.name == "Ned" - RETURN c -``` - -The filter condition reads like: "the attribute *name* of a character document -must be equal to the string *Ned*". If the condition applies, character -document gets returned. This works with any attribute likewise: - -```js -FOR c IN Characters - FILTER c.surname == "Stark" - RETURN c -``` - -Range conditions ----------------- - -Strict equality is one possible condition we can state. There are plenty of -other conditions we can formulate however. For example, we could ask for all -young characters: - -```js -FOR c IN Characters - FILTER c.age >= 13 - RETURN c.name -``` - -```json -[ - "Joffrey", - "Tyrion", - "Samwell", - "Ned", - "Catelyn", - "Cersei", - "Jon", - "Sansa", - "Brienne", - "Theon", - "Davos", - "Jaime", - "Daenerys" -] -``` - -The operator `>=` stands for *greater-or-equal*, so every character of age 13 -or older is returned (only their name in the example). We can return names -and age of all characters younger than 13 by changing the operator to -*less-than* and using the object syntax to define a subset of attributes to -return: - -```js -FOR c IN Characters - FILTER c.age < 13 - RETURN { name: c.name, age: c.age } -``` - -```json -[ - { "name": "Tommen", "age": null }, - { "name": "Arya", "age": 11 }, - { "name": "Roose", "age": null }, - ... -] -``` - -You may notice that it returns name and age of 30 characters, most with an -age of `null`. The reason for this is, that `null` is the fallback value if -an attribute is requested by the query, but no such attribute exists in the -document, and the `null` is compares to numbers as lower (see -[Type and value order](../Fundamentals/TypeValueOrder.md)). Hence, it -accidentally fulfills the age criterion `c.age < 13` (`null < 13`). - -Multiple conditions -------------------- - -To not let documents pass the filter without an age attribute, we can add a -second criterion: - -```js -FOR c IN Characters - FILTER c.age < 13 - FILTER c.age != null - RETURN { name: c.name, age: c.age } -``` - -```json -[ - { "name": "Arya", "age": 11 }, - { "name": "Bran", "age": 10 } -] -``` - -This could equally be written with a boolean `AND` operator as: - -```js -FOR c IN Characters - FILTER c.age < 13 AND c.age != null - RETURN { name: c.name, age: c.age } -``` - -And the second condition could as well be `c.age > null`. - -Alternative conditions ----------------------- - -If you want documents to fulfill one or another condition, possibly for -different attributes as well, use `OR`: - -```js -FOR c IN Characters - FILTER c.name == "Jon" OR c.name == "Joffrey" - RETURN { name: c.name, surname: c.surname } -``` - -```json -[ - { "name": "Joffrey", "surname": "Baratheon" }, - { "name": "Jon", "surname": "Snow" } -] -``` - -See more details about [Filter operations](../Operations/Filter.md). diff --git a/Documentation/Books/AQL/Tutorial/Geospatial.md b/Documentation/Books/AQL/Tutorial/Geospatial.md deleted file mode 100644 index 421c73f2adcb..000000000000 --- a/Documentation/Books/AQL/Tutorial/Geospatial.md +++ /dev/null @@ -1,182 +0,0 @@ -Geospatial queries -================== - -Geospatial coordinates consisting of a latitude and longitude value -can be stored either as two separate attributes, or as a single -attribute in the form of an array with both numeric values. -ArangoDB can [index such coordinates](../../Manual/Indexing/Geo.html) -for fast geospatial queries. - -Locations data --------------- - -Let us insert some filming locations into a new collection *Locations*, -which you need to create first, then run below AQL query: - -![Create Locations collection](Locations_Collection_Creation.png) - -```js -LET places = [ - { "name": "Dragonstone", "coordinate": [ 55.167801, -6.815096 ] }, - { "name": "King's Landing", "coordinate": [ 42.639752, 18.110189 ] }, - { "name": "The Red Keep", "coordinate": [ 35.896447, 14.446442 ] }, - { "name": "Yunkai", "coordinate": [ 31.046642, -7.129532 ] }, - { "name": "Astapor", "coordinate": [ 31.50974, -9.774249 ] }, - { "name": "Winterfell", "coordinate": [ 54.368321, -5.581312 ] }, - { "name": "Vaes Dothrak", "coordinate": [ 54.16776, -6.096125 ] }, - { "name": "Beyond the wall", "coordinate": [ 64.265473, -21.094093 ] } -] - -FOR place IN places - INSERT place INTO Locations -``` - -Visualization of the coordinates on a map with their labels: - -![Locations on map](Locations_Map.png) - -Geospatial index ----------------- - -To query based on coordinates, a [geo index](../../Manual/Indexing/Geo.html) -is required. It determines which fields contain the latitude and longitude -values. - -- Go to *COLLECTIONS* -- Click on the *Locations* collection -- Switch to the *Indexes* tab at top -- Click the green button with a plus on the right-hand side -- Change the type to *Geo Index* -- Enter `coordinate` into the *Fields* field -- Click *Create* to confirm - -![Create geospatial index on coordinate attribute](Locations_GeoIndex_Creation.png) - -![Indexes of Locations collection](Locations_Indexes.png) - -Find nearby locations ---------------------- - -A `FOR` loop is used again, but this time to iterate over the results of a -function call to `NEAR()` to find the *n* closest coordinates to a reference -point, and return the documents with the nearby locations. The default for -*n* is 100, which means 100 documents are returned at most, the closest -matches first. - -In below example, the limit is set to 3. The origin (the reference point) is -a coordinate somewhere downtown in Dublin, Ireland: - -```js -FOR loc IN NEAR(Locations, 53.35, -6.26, 3) - RETURN { - name: loc.name, - latitude: loc.coordinate[0], - longitude: loc.coordinate[1] - } -``` - -```json -[ - { - "name": "Vaes Dothrak", - "latitude": 54.16776, - "longitude": -6.096125 - }, - { - "name": "Winterfell", - "latitude": 54.368321, - "longitude": -5.581312 - }, - { - "name": "Dragonstone", - "latitude": 55.167801, - "longitude": -6.815096 - } -] -``` - -The query returns the location name, as well as the coordinate. The coordinate -is returned as two separate attributes. You may use a simpler `RETURN loc` -instead if you want. - -Find locations within radius ----------------------------- - -`NEAR()` can be swapped out with `WITHIN()`, to search for locations within a -given radius from a reference point. The syntax is the same as for `NEAR()`, -except for the fourth parameter, which specifies the radius instead of a limit. -The unit for the radius is meters. The example uses a radius of 200,000 -meters (200 kilometers): - -```js -FOR loc IN WITHIN(Locations, 53.35, -6.26, 200 * 1000) - RETURN { - name: loc.name, - latitude: loc.coordinate[0], - longitude: loc.coordinate[1] - } -``` - -```json -[ - { - "name": "Vaes Dothrak", - "latitude": 54.16776, - "longitude": -6.096125 - }, - { - "name": "Winterfell", - "latitude": 54.368321, - "longitude": -5.581312 - } -] -``` - -Return the distance -------------------- - -Both `NEAR()` and `WITHIN()` can return the distance to the reference point -by adding an optional fifth parameter. It has to be a string, which will be -used as attribute name for an additional attribute with the distance in meters: - -```js -FOR loc IN NEAR(Locations, 53.35, -6.26, 3, "distance") - RETURN { - name: loc.name, - latitude: loc.coordinate[0], - longitude: loc.coordinate[1], - distance: loc.distance / 1000 - } -``` - -```json -[ - { - "name": "Vaes Dothrak", - "latitude": 54.16776, - "longitude": -6.096125, - "distance": 91.56658640314431 - }, - { - "name": "Winterfell", - "latitude": 54.368321, - "longitude": -5.581312, - "distance": 121.66399816395028 - }, - { - "name": "Dragonstone", - "latitude": 55.167801, - "longitude": -6.815096, - "distance": 205.31879386198324 - } -] -``` - -The extra attribute, here called *distance*, is returned as part of the *loc* -variable, as if it was part of the location document. The value is divided -by 1000 in the example query, to convert the unit to kilometers, simply to -make it better readable. - - diff --git a/Documentation/Books/AQL/Tutorial/Join.md b/Documentation/Books/AQL/Tutorial/Join.md deleted file mode 100644 index 7bc71f39cfa1..000000000000 --- a/Documentation/Books/AQL/Tutorial/Join.md +++ /dev/null @@ -1,322 +0,0 @@ -Joining together -================ - -References to other documents ------------------------------ - -The character data we imported has an attribute *traits* for each character, -which is an array of strings. It does not store character features directly -however: - -```json -{ - "name": "Ned", - "surname": "Stark", - "alive": false, - "age": 41, - "traits": ["A","H","C","N","P"] -} -``` - -It is rather a list of letters without an apparent meaning. The idea here is -that *traits* is supposed to store documents keys of another collection, which -we can use to resolve the letters to labels such as "strong". The benefit of -using another collection for the actual traits is, that we can easily query -for all existing traits later on and store labels in multiple languages for -instance in a central place. If we would embed traits directly... - -```json -{ - "name": "Ned", - "surname": "Stark", - "alive": false, - "age": 41, - "traits": [ - { - "de": "stark", - "en": "strong" - }, - { - "de": "einflussreich", - "en": "powerful" - }, - { - "de": "loyal", - "en": "loyal" - }, - { - "de": "rational", - "en": "rational" - }, - { - "de": "mutig", - "en": "brave" - } - ] -} -``` - -... it becomes really hard to maintain traits. If you were to rename or -translate one of them, you would need to find all other character documents -with the same trait and perform the changes there too. If we only refer to a -trait in another collection, it is as easy as updating a single document. - - - -![Data model comparison](Comparison_DataModels.png) - -Importing traits ----------------- - -Below you find the traits data. Follow the pattern shown in -[Create documents](CRUD.md#create-documents) to import it: - -- Create a document collection *Traits* -- Assign the data to a variable in AQL, `LET data = [ ... ]` -- Use a `FOR` loop to iterate over each array element of the data -- `INSERT` the element `INTO Traits` - -![Create Traits collection](Traits_Collection_Creation.png) - -```json -[ - { "_key": "A", "en": "strong", "de": "stark" }, - { "_key": "B", "en": "polite", "de": "freundlich" }, - { "_key": "C", "en": "loyal", "de": "loyal" }, - { "_key": "D", "en": "beautiful", "de": "schön" }, - { "_key": "E", "en": "sneaky", "de": "hinterlistig" }, - { "_key": "F", "en": "experienced", "de": "erfahren" }, - { "_key": "G", "en": "corrupt", "de": "korrupt" }, - { "_key": "H", "en": "powerful", "de": "einflussreich" }, - { "_key": "I", "en": "naive", "de": "naiv" }, - { "_key": "J", "en": "unmarried", "de": "unverheiratet" }, - { "_key": "K", "en": "skillful", "de": "geschickt" }, - { "_key": "L", "en": "young", "de": "jung" }, - { "_key": "M", "en": "smart", "de": "klug" }, - { "_key": "N", "en": "rational", "de": "rational" }, - { "_key": "O", "en": "ruthless", "de": "skrupellos" }, - { "_key": "P", "en": "brave", "de": "mutig" }, - { "_key": "Q", "en": "mighty", "de": "mächtig" }, - { "_key": "R", "en": "weak", "de": "schwach" } -] -``` - -Resolving traits ----------------- - -Let's start simple by returning only the traits attribute of each character: - -```js -FOR c IN Characters - RETURN c.traits -``` - -```json -[ - { "traits": ["A","H","C","N","P"] }, - { "traits": ["D","H","C"] }, - ... -] -``` - - -Also see the [Fundamentals of Objects / Documents](../Fundamentals/DataTypes.md#objects--documents) -about attribute access. - -We can use the *traits* array together with the `DOCUMENT()` function to use -the elements as document keys and look them up in the *Traits* collection: - -```js -FOR c IN Characters - RETURN DOCUMENT("Traits", c.traits) -``` - -```json -[ - [ - { - "_key": "A", - "_id": "Traits/A", - "_rev": "_V5oRUS2---", - "en": "strong", - "de": "stark" - }, - { - "_key": "H", - "_id": "Traits/H", - "_rev": "_V5oRUS6--E", - "en": "powerful", - "de": "einflussreich" - }, - { - "_key": "C", - "_id": "Traits/C", - "_rev": "_V5oRUS6--_", - "en": "loyal", - "de": "loyal" - }, - { - "_key": "N", - "_id": "Traits/N", - "_rev": "_V5oRUT---D", - "en": "rational", - "de": "rational" - }, - { - "_key": "P", - "_id": "Traits/P", - "_rev": "_V5oRUTC---", - "en": "brave", - "de": "mutig" - } - ], - [ - { - "_key": "D", - "_id": "Traits/D", - "_rev": "_V5oRUS6--A", - "en": "beautiful", - "de": "schön" - }, - { - "_key": "H", - "_id": "Traits/H", - "_rev": "_V5oRUS6--E", - "en": "powerful", - "de": "einflussreich" - }, - { - "_key": "C", - "_id": "Traits/C", - "_rev": "_V5oRUS6--_", - "en": "loyal", - "de": "loyal" - } - ], - ... -] -``` - -The [DOCUMENT() function](../Functions/Miscellaneous.md#document) can be used -to look up a single or multiple documents via document identifiers. In our -example, we pass the collection name from which we want to fetch documents -as first argument (`"Traits"`) and an array of document keys (`_key` attribute) -as second argument. In return we get an array of the full trait documents -for each character. - -This is a bit too much information, so let's only return English labels using -the [array expansion](../Advanced/ArrayOperators.md#array-expansion) notation: - -```js -FOR c IN Characters - RETURN DOCUMENT("Traits", c.traits)[*].en -``` - -```json -[ - [ - "strong", - "powerful", - "loyal", - "rational", - "brave" - ], - [ - "beautiful", - "powerful", - "loyal" - ], - ... -] -``` - -Merging characters and traits ------------------------------ - -Great, we resolved the letters to meaningful traits! But we also need to know -to which character they belong. Thus, we need to merge both the character -document and the data from the trait documents: - -```js -FOR c IN Characters - RETURN MERGE(c, { traits: DOCUMENT("Traits", c.traits)[*].en } ) -``` - -```json -[ - { - "_id": "Characters/2861650", - "_key": "2861650", - "_rev": "_V1bzsXa---", - "age": 41, - "alive": false, - "name": "Ned", - "surname": "Stark", - "traits": [ - "strong", - "powerful", - "loyal", - "rational", - "brave" - ] - }, - { - "_id": "Characters/2861653", - "_key": "2861653", - "_rev": "_V1bzsXa--B", - "age": 40, - "alive": false, - "name": "Catelyn", - "surname": "Stark", - "traits": [ - "beautiful", - "powerful", - "loyal" - ] - }, - ... -] -``` - -The `MERGE()` functions merges objects together. Because we used an object -`{ traits: ... }` which has the same attribute name *traits* as the original -character attribute, the latter got overwritten by the merge operation. - -Join another way ----------------- - -The `DOCUMENT()` function utilizes primary indices to look up documents quickly. -It is limited to find documents via their identifiers however. For a use case -like in our example it is sufficient to accomplish a simple join. - -There is another, more flexible syntax for joins: nested `FOR` loops over -multiple collections, with a `FILTER` condition to match up attributes. -In case of the traits key array, there needs to be a third loop to iterate -over the keys: - -```js -FOR c IN Characters - RETURN MERGE(c, { - traits: ( - FOR key IN c.traits - FOR t IN Traits - FILTER t._key == key - RETURN t.en - ) - }) -``` - -For each character, it loops over its *traits* attribute (e.g. `["D","H","C"]`) -and for each document reference in this array, it loops over the *Traits* -collections. There is a condition to match the document key with the key -reference. The inner `FOR` loop and the `FILTER` get transformed to a primary -index lookup in this case instead of building up a Cartesian product only to -filter away everything but a single match: Document keys within a collection -are unique, thus there can only be one match. - -Each written-out, English trait is returned and all the traits are then merged -with the character document. The result is identical to the query using -`DOCUMENT()`. However, this approach with a nested `FOR` loop and a `FILTER` -is not limited to primary keys. You can do this with any other attribute as well. -For an efficient lookup, make sure you add a hash index for this attribute. -If its values are unique, then also set the index option to unique. diff --git a/Documentation/Books/AQL/Tutorial/Locations_Collection_Creation.png b/Documentation/Books/AQL/Tutorial/Locations_Collection_Creation.png deleted file mode 100644 index a85082d26c8f..000000000000 Binary files a/Documentation/Books/AQL/Tutorial/Locations_Collection_Creation.png and /dev/null differ diff --git a/Documentation/Books/AQL/Tutorial/Locations_GeoIndex_Creation.png b/Documentation/Books/AQL/Tutorial/Locations_GeoIndex_Creation.png deleted file mode 100644 index ec886021763f..000000000000 Binary files a/Documentation/Books/AQL/Tutorial/Locations_GeoIndex_Creation.png and /dev/null differ diff --git a/Documentation/Books/AQL/Tutorial/Locations_Indexes.png b/Documentation/Books/AQL/Tutorial/Locations_Indexes.png deleted file mode 100644 index 1b15fdc177b7..000000000000 Binary files a/Documentation/Books/AQL/Tutorial/Locations_Indexes.png and /dev/null differ diff --git a/Documentation/Books/AQL/Tutorial/Locations_Map.png b/Documentation/Books/AQL/Tutorial/Locations_Map.png deleted file mode 100644 index f292bd6ab162..000000000000 Binary files a/Documentation/Books/AQL/Tutorial/Locations_Map.png and /dev/null differ diff --git a/Documentation/Books/AQL/Tutorial/Locations_Table.png b/Documentation/Books/AQL/Tutorial/Locations_Table.png deleted file mode 100644 index bfa70d2c447b..000000000000 Binary files a/Documentation/Books/AQL/Tutorial/Locations_Table.png and /dev/null differ diff --git a/Documentation/Books/AQL/Tutorial/Query_Insert.png b/Documentation/Books/AQL/Tutorial/Query_Insert.png deleted file mode 100644 index 9966e0989f8e..000000000000 Binary files a/Documentation/Books/AQL/Tutorial/Query_Insert.png and /dev/null differ diff --git a/Documentation/Books/AQL/Tutorial/README.md b/Documentation/Books/AQL/Tutorial/README.md deleted file mode 100644 index 25745e92f055..000000000000 --- a/Documentation/Books/AQL/Tutorial/README.md +++ /dev/null @@ -1,55 +0,0 @@ -AQL tutorial -============ - -This is an introduction to ArangoDB's query language AQL, built around a small -dataset of characters from the novel and fantasy drama television series -Game of Thrones (as of season 1). It includes character traits in two languages, -some family relations, and last but not least a small set of filming locations, -which makes for an interesting mix of data to work with. - -There is no need to import the data before you start. It is provided as part -of the AQL queries in this tutorial. You can interact with ArangoDB using its -[web interface](../../Manual/GettingStarted/WebInterface.html) to manage -collections and execute the queries. - -Chapters --------- - -- [Basic CRUD](CRUD.md) -- [Matching documents](Filter.md) -- [Sorting and limiting](SortLimit.md) -- [Joining together](Join.md) -- [Graph traversal](Traversal.md) -- [Geospatial queries](Geospatial.md) - - - -Dataset -------- - -### Characters - -The dataset features 43 characters with their name, surname, age, alive status -and trait references. The surname and age properties are not always present. -The column *traits (resolved)* is not part of the actual data used in this -tutorial, but included for your convenience. - -![Characters table](Characters_Table.png) - -### Traits - -There are 18 unique traits. Each trait has a random letter as document key. -The trait labels come in English and German. - -![Traits table](Traits_Table.png) - -### Locations - -This small collection of 8 filming locations comes with two attributes, a -*name* and a *coordinate*. The coordinates are modeled as number arrays, -comprised of a latitude and a longitude value each. - -![Locations table](Locations_Table.png) diff --git a/Documentation/Books/AQL/Tutorial/SortLimit.md b/Documentation/Books/AQL/Tutorial/SortLimit.md deleted file mode 100644 index d9b28f172372..000000000000 --- a/Documentation/Books/AQL/Tutorial/SortLimit.md +++ /dev/null @@ -1,184 +0,0 @@ -Sorting and limiting -==================== - -Cap the result count --------------------- - -It may not always be necessary to return all documents, that a `FOR` loop -would normally return. In those cases, we can limit the amount of documents -with a `LIMIT()` operation: - -```js -FOR c IN Characters - LIMIT 5 - RETURN c.name -``` - -```json -[ - "Joffrey", - "Tommen", - "Tyrion", - "Roose", - "Tywin" -] -``` - -`LIMIT` is followed by a number for the maximum document count. There is a -second syntax however, which allows you to skip a certain amount of record -and return the next *n* documents: - -```js -FOR c IN Characters - LIMIT 2, 5 - RETURN c.name -``` - -```json -[ - "Tyrion", - "Roose", - "Tywin", - "Samwell", - "Melisandre" -] -``` - -See how the second query skipped the first two names and returned the next -five (both results feature Tyrion, Roose and Tywin). - -Sort by name ------------- - -The order in which matching records were returned by the queries shown until -here was basically random. To return them in a defined order, we can add a -`SORT()` operation. It can have a big impact on the result if combined with -a `LIMIT()`, because the result becomes predictable if you sort first. - -```js -FOR c IN Characters - SORT c.name - LIMIT 10 - RETURN c.name -``` - -```json -[ - "Arya", - "Bran", - "Brienne", - "Bronn", - "Catelyn", - "Cersei", - "Daario", - "Daenerys", - "Davos", - "Ellaria" -] -``` - -See how it sorted by name, then returned the ten alphabetically first coming -names. We can reverse the sort order with `DESC` like descending: - -```js -FOR c IN Characters - SORT c.name DESC - LIMIT 10 - RETURN c.name -``` - -```json -[ - "Ygritte", - "Viserys", - "Varys", - "Tywin", - "Tyrion", - "Tormund", - "Tommen", - "Theon", - "The High Sparrow", - "Talisa" -] -``` - -The first sort was ascending, which is the default order. Because it is the -default, it is not required to explicitly ask for `ASC` order. - -Sort by multiple attributes ---------------------------- - -Assume we want to sort by surname. Many of the characters share a surname. -The result order among characters with the same surname is undefined. We can -first sort by surname, then name to determine the order: - -```js -FOR c IN Characters - FILTER c.surname - SORT c.surname, c.name - LIMIT 10 - RETURN { - surname: c.surname, - name: c.name - } -``` - -```json -[ - { "surname": "Baelish", "name": "Petyr" }, - { "surname": "Baratheon", "name": "Joffrey" }, - { "surname": "Baratheon", "name": "Robert" }, - { "surname": "Baratheon", "name": "Stannis" }, - { "surname": "Baratheon", "name": "Tommen" }, - { "surname": "Bolton", "name": "Ramsay" }, - { "surname": "Bolton", "name": "Roose" }, - { "surname": "Clegane", "name": "Sandor" }, - { "surname": "Drogo", "name": "Khal" }, - { "surname": "Giantsbane", "name": "Tormund" } -] -``` - -Overall, the documents are sorted by last name. If the *surname* is the same -for two characters, the *name* values are compared and the result sorted. - -Note that a filter is applied before sorting, to only let documents through, -that actually feature a surname value (many don't have it and would cause -`null` values in the result). - -Sort by age ------------ - -The order can also be determined by a numeric value, such as the age: - -```js -FOR c IN Characters - FILTER c.age - SORT c.age - LIMIT 10 - RETURN { - name: c.name, - age: c.age - } -``` - -```json -[ - { "name": "Bran", "age": 10 }, - { "name": "Arya", "age": 11 }, - { "name": "Sansa", "age": 13 }, - { "name": "Jon", "age": 16 }, - { "name": "Theon", "age": 16 }, - { "name": "Daenerys", "age": 16 }, - { "name": "Samwell", "age": 17 }, - { "name": "Joffrey", "age": 19 }, - { "name": "Tyrion", "age": 32 }, - { "name": "Brienne", "age": 32 } -] -``` - -A filter is applied to avoid documents without age attribute. The remaining -documents are sorted by age in ascending order, and the name and age of the -ten youngest characters are returned. - -See the [SORT operation](../Operations/Sort.md) and -[LIMIT operation](../Operations/Limit.md) documentation for more details. diff --git a/Documentation/Books/AQL/Tutorial/Traits_Collection_Creation.png b/Documentation/Books/AQL/Tutorial/Traits_Collection_Creation.png deleted file mode 100644 index 899ce51ccfd0..000000000000 Binary files a/Documentation/Books/AQL/Tutorial/Traits_Collection_Creation.png and /dev/null differ diff --git a/Documentation/Books/AQL/Tutorial/Traits_Table.png b/Documentation/Books/AQL/Tutorial/Traits_Table.png deleted file mode 100644 index d3869349edbc..000000000000 Binary files a/Documentation/Books/AQL/Tutorial/Traits_Table.png and /dev/null differ diff --git a/Documentation/Books/AQL/Tutorial/Traversal.md b/Documentation/Books/AQL/Tutorial/Traversal.md deleted file mode 100644 index ce54a920f5dd..000000000000 --- a/Documentation/Books/AQL/Tutorial/Traversal.md +++ /dev/null @@ -1,307 +0,0 @@ -Traversal -========= - -Relations such as between parents and children can be modeled as graph. -In ArangoDB, two documents (a parent and a child character document) can be -linked by an edge document. Edge documents are stored in edge collections and -have two additional attributes: `_from` and `_to`. They reference any two -documents by their document IDs (`_id`). - -ChildOf relations ------------------ - -Our characters have the following relations between parents and children -(first names only for a better overview): - -``` - Robb -> Ned - Sansa -> Ned - Arya -> Ned - Bran -> Ned - Jon -> Ned - Robb -> Catelyn - Sansa -> Catelyn - Arya -> Catelyn - Bran -> Catelyn - Jaime -> Tywin - Cersei -> Tywin - Tyrion -> Tywin - Joffrey -> Jaime - Joffrey -> Cersei -``` - -Visualized as graph: - -![ChildOf graph visualization](ChildOf_Graph.png) - -Creating the edges ------------------- - -To create the required edge documents to store these relations in the database, -we can run a query that combines joining and filtering to match up the right -character documents, then use their `_id` attribute to insert an edge into an -edge collection *ChildOf*. - -First off, create a new collection with the name *ChildOf* and make sure you -change the collection type to **Edge**. - -![Create ChildOf edge collection](ChildOf_Collection_Creation.png) - -Then run the following query: - -```js -LET data = [ - { - "parent": { "name": "Ned", "surname": "Stark" }, - "child": { "name": "Robb", "surname": "Stark" } - }, { - "parent": { "name": "Ned", "surname": "Stark" }, - "child": { "name": "Sansa", "surname": "Stark" } - }, { - "parent": { "name": "Ned", "surname": "Stark" }, - "child": { "name": "Arya", "surname": "Stark" } - }, { - "parent": { "name": "Ned", "surname": "Stark" }, - "child": { "name": "Bran", "surname": "Stark" } - }, { - "parent": { "name": "Catelyn", "surname": "Stark" }, - "child": { "name": "Robb", "surname": "Stark" } - }, { - "parent": { "name": "Catelyn", "surname": "Stark" }, - "child": { "name": "Sansa", "surname": "Stark" } - }, { - "parent": { "name": "Catelyn", "surname": "Stark" }, - "child": { "name": "Arya", "surname": "Stark" } - }, { - "parent": { "name": "Catelyn", "surname": "Stark" }, - "child": { "name": "Bran", "surname": "Stark" } - }, { - "parent": { "name": "Ned", "surname": "Stark" }, - "child": { "name": "Jon", "surname": "Snow" } - }, { - "parent": { "name": "Tywin", "surname": "Lannister" }, - "child": { "name": "Jaime", "surname": "Lannister" } - }, { - "parent": { "name": "Tywin", "surname": "Lannister" }, - "child": { "name": "Cersei", "surname": "Lannister" } - }, { - "parent": { "name": "Tywin", "surname": "Lannister" }, - "child": { "name": "Tyrion", "surname": "Lannister" } - }, { - "parent": { "name": "Cersei", "surname": "Lannister" }, - "child": { "name": "Joffrey", "surname": "Baratheon" } - }, { - "parent": { "name": "Jaime", "surname": "Lannister" }, - "child": { "name": "Joffrey", "surname": "Baratheon" } - } -] - -FOR rel in data - LET parentId = FIRST( - FOR c IN Characters - FILTER c.name == rel.parent.name - FILTER c.surname == rel.parent.surname - LIMIT 1 - RETURN c._id - ) - LET childId = FIRST( - FOR c IN Characters - FILTER c.name == rel.child.name - FILTER c.surname == rel.child.surname - LIMIT 1 - RETURN c._id - ) - FILTER parentId != null AND childId != null - INSERT { _from: childId, _to: parentId } INTO ChildOf - RETURN NEW -``` - -The character documents don't have user-defined keys. If they had, it would -allow us to create the edges more easily like: - -```js -INSERT { _from: "Characters/robb", _to: "Characters/ned" } INTO ChildOf -``` - -However, creating the edges programmatically based on character names is a -good exercise. Breakdown of the query: - -- Assign the relations in form of an array of objects with a *parent* and - a *child* attribute each, both with sub-attributes *name* and *surname*, - to a variable `data` -- For each element in this array, assign a relation to a variable `rel` and - execute the subsequent instructions -- Assign the result of an expression to a variable `parentId` - - Take the first element of a sub-query result (sub-queries are enclosed - by parentheses, but here they are also a function call) - - For each document in the Characters collection, assign the document - to a variable `c` - - Apply two filter conditions: the name in the character document must - equal the parent name in `rel`, and the surname must also equal the - surname give in the relations data - - Stop after the first match for efficiency - - Return the ID of the character document (the result of the sub-query - is an array with one element, `FIRST()` takes this element and assigns - it to the `parentId` variable) -- Assign the result of an expression to a variable `childId` - - A sub-query is used to find the child character document and the ID is - returned, in the same way as the parent document ID (see above) -- If either or both of the sub-queries were unable to find a match, skip the - current relation, because two IDs for both ends of an edge are required to - create one (this is only a precaution) -- Insert a new edge document into the ChildOf collection, with the edge going - from `childId` to `parentId` and no other attributes -- Return the new edge document (optional) - -Traverse to the parents ------------------------ - -Now that edges link character documents (vertices), we have a graph we can -query to find out who the parents are of another character – or in -graph terms, we want to start at a vertex and follow the edges to other -vertices in an [AQL graph traversal](../Graphs/Traversals.md): - -```js -FOR v IN 1..1 OUTBOUND "Characters/2901776" ChildOf - RETURN v.name -``` - -This `FOR` loop doesn't iterate over a collection or an array, it walks the -graph and iterates over the connected vertices it finds, with the vertex -document assigned to a variable (here: `v`). It can also emit the edges it -walked as well as the full path from start to end to -[another two variables](../Graphs/Traversals.md#syntax). - -In above query, the traversal is restricted to a minimum and maximum traversal -depth of 1 (how many steps to take from the start vertex), and to only follow -edges in `OUTBOUND` direction. Our edges point from child to parent, and the -parent is one step away from the child, thus it gives us the parents of the -child we start at. `"Characters/2901776"` is that start vertex. Note that the -document ID will be different for you, so please adjust it to your document ID -of e.g. the Bran Stark document: - -```js -FOR c IN Characters - FILTER c.name == "Bran" - RETURN c._id -``` - -```json -[ "Characters/" ] -``` - -You may also combine this query with the traversal directly, to easily change -the start vertex by adjusting the filter condition(s): - -```js -FOR c IN Characters - FILTER c.name == "Bran" - FOR v IN 1..1 OUTBOUND c ChildOf - RETURN v.name -``` - -The start vertex is followed by `ChildOf`, which is our edge collection. The -example query returns only the name of each parent to keep the result short: - -```json -[ - "Ned", - "Catelyn" -] -``` - -The same result will be returned for Robb, Arya and Sansa as starting point. -For Jon Snow, it will only be Ned. - -Traverse to the children ------------------------- - -We can also walk from a parent in reverse edge direction (`INBOUND` that is) -to the children: - -```js -FOR c IN Characters - FILTER c.name == "Ned" - FOR v IN 1..1 INBOUND c ChildOf - RETURN v.name -``` - -```json -[ - "Robb", - "Sansa", - "Jon", - "Arya", - "Bran" -] -``` - -Traverse to the grandchildren ------------------------------ - -For the Lannister family, we have relations that span from parent to -grandchild. Let's change the traversal depth to return grandchildren, -which means to go exactly two steps: - -```js -FOR c IN Characters - FILTER c.name == "Tywin" - FOR v IN 2..2 INBOUND c ChildOf - RETURN v.name -``` - -```json -[ - "Joffrey", - "Joffrey" -] -``` - -It might be a bit unexpected, that Joffrey is returned twice. However, if you -look at the graph visualization, you can see that multiple paths lead from -Joffrey (bottom right) to Tywin: - -![ChildOf graph visualization](ChildOf_Graph.png) - -``` -Tywin <- Jaime <- Joffrey -Tywin <- Cersei <- Joffrey -``` - -As a quick fix, change the last line of the query to `RETURN DISTINCT v.name` -to return each value only once. Keep in mind though, that there are -[traversal options](../Graphs/Traversals.md#syntax) to suppress duplicate -vertices early on. - -Also check out the -[ArangoDB Graph Course](https://www.arangodb.com/arangodb-graph-course) -which covers the basics, but also explains different traversal options -and advanced graph queries. - -Traverse with variable depth ----------------------------- - -To return the parents and grandparents of Joffrey, we can walk edges in -`OUTBOUND` direction and adjust the traversal depth to go at least 1 step, -and 2 at most: - -```js -FOR c IN Characters - FILTER c.name == "Joffrey" - FOR v IN 1..2 OUTBOUND c ChildOf - RETURN DISTINCT v.name -``` - -```json -[ - "Cersei", - "Tywin", - "Jaime" -] -``` - -If we had deeper family trees, it would only be a matter of changing the depth -values to query for great-grandchildren and similar relations. - - - \ No newline at end of file diff --git a/Documentation/Books/AQL/Views/ArangoSearch/README.md b/Documentation/Books/AQL/Views/ArangoSearch/README.md deleted file mode 100644 index 416692900afd..000000000000 --- a/Documentation/Books/AQL/Views/ArangoSearch/README.md +++ /dev/null @@ -1,653 +0,0 @@ -ArangoSearch Views in AQL -========================= - -Views of type `arangosearch` are an integration layer meant to seamlessly -integrate with and natively expose the full power of the -[IResearch library](https://github.com/iresearch-toolkit/iresearch) -to the ArangoDB user. - -They provide the capability to: - -- evaluate together documents located in different collections -- search documents based on AQL boolean expressions and functions -- sort the result set based on how closely each document matched the search - -Overview and Significance -------------------------- - -Looking up documents in an ArangoSearch View is done via the `FOR` keyword: - -```js -FOR doc IN someView - ... -``` - -`FOR` operations over ArangoSearch Views have an additional, optional, `SEARCH` -keyword: - -```js -FOR doc IN someView - SEARCH searchExpression -``` - -ArangoSearch views cannot be used as edge collections in traversals: - -```js -FOR v IN 1..3 ANY startVertex someView /* invalid! */ -``` - -### SEARCH - -`SEARCH` expressions look a lot like `FILTER` operations, but have some noteable -differences. - -First of all, filters and functions in `SEARCH`, when applied to documents -_emitted from an ArangoSearch View_, work _only_ on attributes linked in the -view. - -For example, given a collection `myCol` with the following documents: - -```js -[ - { someAttr: 'One', anotherAttr: 'One' }, - { someAttr: 'Two', anotherAttr: 'Two' } -] -``` - -with a view, where `someAttr` is indexed by the following view `myView`: - -```js -{ - "type": "arangosearch", - "links": { - "myCol": { - "fields": { - "someAttr": {} - } - } - } -} -``` - -Then, a search on `someAttr` yields the following result: - -```js -FOR doc IN myView - SEARCH doc.someAttr == 'One' - RETURN doc -``` - -```js -[ { someAttr: 'One', anotherAttr: 'One' } ] -``` - -While a search on `anotherAttr` yields an empty result: - -```js -FOR doc IN myView - SEARCH doc.anotherAttr == 'One' - RETURN doc -``` - -```js -[] -``` - -- This only applies to the expression after the `SEARCH` keyword. -- This only applies to tests regarding documents emitted from a view. Other - tests are not affected. -- In order to use `SEARCH` using all attributes of a linked sources, the special - `includeAllFields` [link property](../../../Manual/Views/ArangoSearch/DetailedOverview.html#link-properties) - was designed. - -### SORT - -The document search via the `SEARCH` keyword and the sorting via the -ArangoSearch functions, namely `BM25()` and `TFIDF()`, are closely intertwined. -The query given in the `SEARCH` expression is not only used to filter documents, -but also is used with the sorting functions to decide which document matches -the query best. Other documents in the view also affect this decision. - -Therefore the ArangoSearch sorting functions can work _only_ on documents -emitted from a view, as both the corresponding `SEARCH` expression and the view -itself are consulted in order to sort the results. - -The `BOOST()` function, described below, can be used to fine-tune the resulting -ranking by weighing sub-expressions in `SEARCH` differently. - -### Arrays and trackListPositions - -Unless [**trackListPositions**](../../../Manual/Views/ArangoSearch/DetailedOverview.html#link-properties) -is set to `true`, which it is not by default, arrays behave differently. Namely -they behave like a disjunctive superposition of their values - this is best -shown with an example. - -With `trackListPositions: false`, which is the default, and given a document -`doc` containing - -```js -{ attr: [ 'valueX', 'valueY', 'valueZ' ] } -``` - -in a `SEARCH` clause, the expression - -```js -doc.attr == 'valueX' -``` - -will be true, as will be - -```js -doc.attr == 'valueY' -``` - -and `== valueZ`. With `trackListPositions: true`, - -```js -doc.attr[0] == 'valueX' -``` - -would work as usual. - -### Comparing analyzed fields - -As described in [value analysis](#arangosearch-value-analysis), when a field is -processed by a specific analyzer, comparison tests are done per word. For -example, given the field `text` is analyzed with `"text_en"` and contains the -string `"a quick brown fox jumps over the lazy dog"`, the following expression -will be true: - -```js -ANALYZER(d.text == 'fox', "text_en") -``` - -Note also, that the words analyzed in the text are stemmed, so this is also -true: - -```js -ANALYZER(d.text == 'jump', "text_en") -``` - -So a comparison will actually test if a word is contained in the text. With -`trackListPositions: false`, this means for arrays if the word is contained in -any element of the array. For example, given - -```js -d.text = [ "a quick", "brown fox", "jumps over the", "lazy dog"] -``` - -the following will be true: - -```js -ANALYZER(d.text == 'jump', "text_en") -``` - -ArangoSearch value analysis ---------------------------- - -A concept of value 'analysis' that is meant to break up a given value into -a set of sub-values internally tied together by metadata which influences both -the search and sort stages to provide the most appropriate match for the -specified conditions, similar to queries to web search engines. - -In plain terms this means a user can for example: - -- request documents where the 'body' attribute best matches 'a quick brown fox' -- request documents where the 'dna' attribute best matches a DNA sub sequence -- request documents where the 'name' attribute best matches gender -- etc. (via custom analyzers) - -To a limited degree the concept of 'analysis' is even available in -non-ArangoSearch AQL, e.g. the TOKENS(...) function will utilize the power of -IResearch to break up a value into an AQL array that can be used anywhere in the -AQL query. - -In plain terms this means a user can match a document attribute when its -value matches at least one entry from a set, -e.g. to match docs with 'word == quick' OR 'word == brown' OR 'word == fox' - - FOR doc IN someCollection - FILTER doc.word IN TOKENS('a quick brown fox', 'text_en') - RETURN doc - -ArangoSearch filters --------------------- - -The basic ArangoSearch functionality can be accessed via the `SEARCH` statement -with common AQL filters and operators, e.g.: - -- `AND` -- `OR` -- `NOT` -- `==` -- `<=` -- `>=` -- `<` -- `>` -- `!=` -- `IN ` -- `IN ` - -However, the full power of ArangoSearch is harnessed and exposed via functions, -during both the search and sort stages. - -Note, that `SEARCH` statement, in contrast to `FILTER`, is meant to be treated -as a part of the `FOR` operation, not as an individual statement. - -The supported AQL context functions are: - -### ANALYZER() - -`ANALYZER(searchExpression, analyzer)` - -Override analyzer in a context of **searchExpression** with another one, -denoted by a specified **analyzer** argument, making it available for search -functions. - -- *searchExpression* - any valid search expression -- *analyzer* - string with the analyzer to imbue, i.e. *"text_en"* or one of the - other [available string analyzers](../../../Manual/Views/ArangoSearch/Analyzers.html) - -By default, context contains `Identity` analyzer. - -### BOOST() - -`BOOST(searchExpression, boost)` - -Override boost in a context of **searchExpression** with a specified value, -making it available for scorer functions. - -- *searchExpression* - any valid search expression -- *boost* - numeric boost value - -By default, context contains boost value equal to `1.0`. - -The supported search functions are: - -### EXISTS() - -Note: Will only match values when the specified attribute has been processed -with the link property **storeValues** set to **"id"** (by default it's -**"none"**). - -`EXISTS(doc.someAttr)` - -Match documents **doc** where the attribute **someAttr** exists in the -document. - -This also works with sub-attributes, e.g. - -`EXISTS(doc.someAttr.anotherAttr)` - -as long as the field is processed by the view with **storeValues** not -**none**. - -`EXISTS(doc.someAttr, "analyzer", analyzer)` - -Match documents where **doc.someAttr** exists in the document _and_ was indexed -by the specified **analyzer**. **analyzer** is optional and defaults to the -current context analyzer (e.g. specified by `ANALYZER` function). - -`EXISTS(doc.someAttr, type)` - -Match documents where the **doc.someAttr** exists in the document - and is of the specified type. - -- *doc.someAttr* - the path of the attribute to exist in the document -- *analyzer* - string with the analyzer used, i.e. *"text_en"* or one of the - other [available string analyzers](../../../Manual/Views/ArangoSearch/Analyzers.html) -- *type* - data type as string; one of: - - **bool** - - **boolean** - - **numeric** - - **null** - - **string** - -In case if **analyzer** isn't specified, current context analyzer (e.g. -specified by `ANALYZER` function) will be used. - -### PHRASE() - -``` -PHRASE(doc.someAttr, - phrasePart [, skipTokens] [, phrasePart | , phrasePart, skipTokens]* - [, analyzer]) -``` - -Search for a phrase in the referenced attributes. - -The phrase can be expressed as an arbitrary number of *phraseParts* separated by -*skipToken* number of tokens. - -- *doc.someAttr* - the path of the attribute to compare against in the document -- *phrasePart* - a string to search in the token stream; may consist of several - words; will be split using the specified *analyzer* -- *skipTokens* number of words or tokens to treat as wildcards -- *analyzer* - string with the analyzer used, i.e. *"text_en"* or one of the - other [available string analyzers - ](../../../Manual/Views/ArangoSearch/Analyzers.html) - -For example, given a document `doc` containing the text `"Lorem ipsum dolor sit -amet, consectetur adipiscing elit"`, the following expression will be `true`: - -```js -PHRASE(doc.text, "ipsum", 1, "sit", 2, "adipiscing", "text_de") -``` - -Specifying deep attributes like `doc.some.deep.attr` is also allowed. The -attribute has to be processed by the view as specified in the link. - -### STARTS_WITH() - -`STARTS_WITH(doc.someAttr, prefix)` - -Match the value of the **doc.someAttr** that starts with **prefix** - -- *doc.someAttr* - the path of the attribute to compare against in the document -- *prefix* - a string to search at the start of the text - -Specifying deep attributes like `doc.some.deep.attr` is also allowed. The -attribute has to be processed by the view as specified in the link. - -### TOKENS() - -`TOKENS(input, analyzer)` - -Split the **input** string with the help of the specified **analyzer** into an -Array. The resulting Array can i.e. be used in subsequent `FILTER` or `SEARCH` -statements with the **IN** operator. This can be used to better understand how -the specific analyzer is going to behave. -- *input* string to tokenize -- *analyzer* one of the [available string_analyzers](../../../Manual/Views/ArangoSearch/Analyzers.html) - -### MIN_MATCH() - -`MIN_MATCH(searchExpression [, searchExpression]*, minMatchCount)` - -Match documents where at least **minMatchCount** of the specified -**searchExpression**s are satisfied. - -- *searchExpression* - any valid search expression -- *minMatchCount* - minimum number of *searchExpression*s that should be - satisfied - -For example, - -```js -MIN_MATCH(doc.text == 'quick', doc.text == 'brown', doc.text == 'fox', 2) -``` - -if `doc.text`, as analyzed by the current analyzer, contains 2 out of 'quick', -'brown' and 'fox', it will be included as matched one. - -### Searching examples - -to match documents which have a 'name' attribute - - FOR doc IN someView SEARCH EXISTS(doc.name) - RETURN doc - -or - - FOR doc IN someView SEARCH EXISTS(doc['name']) - RETURN doc - -to match documents where 'body' was analyzed via the 'text_en' analyzer - - FOR doc IN someView SEARCH EXISTS(doc.body, 'analyzer', 'text_en') - RETURN doc - -or - - FOR doc IN someView SEARCH EXISTS(doc['body'], 'analyzer', 'text_en') - RETURN doc - -or - - FOR doc IN someView SEARCH ANALYZER(EXISTS(doc['body'], 'analyzer'), 'text_en') - RETURN doc - -to match documents which have an 'age' attribute of type number - - FOR doc IN someView SEARCH EXISTS(doc.age, 'numeric') - RETURN doc - -or - - FOR doc IN someView SEARCH EXISTS(doc['age'], 'numeric') - RETURN doc - -to match documents where 'description' contains word 'quick' or word -'brown' and has been analyzed with 'text_en' analyzer - - FOR doc IN someView SEARCH ANALYZER(doc.description == 'quick' OR doc.description == 'brown', 'text_en') - RETURN doc - -to match documents where 'description' contains at least 2 of 3 words 'quick', -'brown', 'fox' and has been analyzed with 'text_en' analyzer - - FOR doc IN someView SEARCH ANALYZER( - MIN_MATCH(doc.description == 'quick', doc.description == 'brown', doc.description == 'fox', 2), - 'text_en' - ) - RETURN doc - -to match documents where 'description' contains a phrase 'quick brown' - - FOR doc IN someView SEARCH PHRASE(doc.description, [ 'quick brown' ], 'text_en') - RETURN doc - -or - - FOR doc IN someView SEARCH PHRASE(doc['description'], [ 'quick brown' ], 'text_en') - RETURN doc - -or - - FOR doc IN someView SEARCH ANALYZER(PHRASE(doc['description'], [ 'quick brown' ]), 'text_en') - RETURN doc - -to match documents where 'body' contains the phrase consisting of a sequence -like this: -'quick' * 'fox jumps' (where the asterisk can be any single word) - - FOR doc IN someView SEARCH PHRASE(doc.body, [ 'quick', 1, 'fox jumps' ], 'text_en') - RETURN doc - -or - - FOR doc IN someView SEARCH PHRASE(doc['body'], [ 'quick', 1, 'fox jumps' ], 'text_en') - RETURN doc - -or - - FOR doc IN someView SEARCH ANALYZER(PHRASE(doc['body'], [ 'quick', 1, 'fox jumps' ]), 'text_en') - RETURN doc - -to match documents where 'story' starts with 'In the beginning' - - FOR doc IN someView SEARCH STARTS_WITH(doc.story, 'In the beginning') - RETURN DOC - -or - - FOR doc IN someView SEARCH STARTS_WITH(doc['story'], 'In the beginning') - RETURN DOC - -to watch the analyzer doing its work - - RETURN TOKENS('a quick brown fox', 'text_en') - -to match documents where 'description' best matches 'a quick brown fox' - - FOR doc IN someView SEARCH ANALYZER(doc.description IN TOKENS('a quick brown fox', 'text_en'), 'text_en') - RETURN doc - -ArangoSearch sorting --------------------- - -A major feature of ArangoSearch Views is their capability of sorting results -based on the creation-time search conditions and zero or more sorting functions. -The ArangoSearch sorting functions available are `TFIDF()` and `BM25()`. - -Note: The first argument to any ArangoSearch sorting function is _always_ the -document emitted by a `FOR` operation over an ArangoSearch View. - -Note: An ArangoSearch sorting function is _only_ allowed as an argument to a -`SORT` operation. But they can be mixed with other arguments to `SORT`. - -So the following examples are valid: - -```js -FOR doc IN someView - SORT TFIDF(doc) -``` - -```js -FOR a IN viewA - FOR b IN viewB - SORT BM25(a), TFIDF(b) -``` - -```js -FOR a IN viewA - FOR c IN someCollection - FOR b IN viewB - SORT TFIDF(b), c.name, BM25(a) -``` - -while these will _not_ work: - -```js -FOR doc IN someCollection - SORT TFIDF(doc) // !!! Error -``` -```js -FOR doc IN someCollection - RETURN BM25(doc) // !!! Error -``` -```js -FOR doc IN someCollection - SORT BM25(doc.someAttr) // !!! Error -``` -```js -FOR doc IN someView - SORT TFIDF("someString") // !!! Error -``` -```js -FOR doc IN someView - SORT BM25({some: obj}) // !!! Error -``` - -The following sorting methods are available: - -### Literal sorting -You can sort documents by simply specifying arbitrary values or expressions, as -you do in other places. - -### BM25() - -`BM25(doc, k, b)` - -- *k* (number, _optional_): calibrates the text term frequency scaling, the default is -_1.2_. A *k* value of _0_ corresponds to a binary model (no term frequency), and a large -value corresponds to using raw term frequency -- *b* (number, _optional_): determines the scaling by the total text length, the default -is _0.75_. At the extreme values of the coefficient *b*, BM25 turns into ranking -functions known as BM11 (for *b* = `1`, corresponds to fully scaling the term weight by -the total text length) and BM15 (for *b* = `0`, corresponds to no length normalization) - -Sorts documents using the [**Best Matching 25** algorithm](https://en.wikipedia.org/wiki/Okapi_BM25). -See the [`BM25()` section in ArangoSearch Scorers](../../../Manual/Views/ArangoSearch/Scorers.html) -for details. - -### TFIDF() - -`TFIDF(doc, withNorms)` - -- *doc* (document): must be emitted by `FOR doc IN someView` -- *withNorms* (bool, _optional_): specifying whether scores should be - normalized, the default is _false_ - -Sorts documents using the -[**term frequency–inverse document frequency** algorithm](https://en.wikipedia.org/wiki/TF-IDF). -See the -[`TFIDF()` section in ArangoSearch Scorers](../../../Manual/Views/ArangoSearch/Scorers.html) -for details. - - -### Sorting examples - -to sort documents by the value of the 'name' attribute - - FOR doc IN someView - SORT doc.name - RETURN doc - -or - - FOR doc IN someView - SORT doc['name'] - RETURN doc - -to sort documents via the -[BM25 algorithm](https://en.wikipedia.org/wiki/Okapi_BM25) - - FOR doc IN someView - SORT BM25(doc) - RETURN doc - -to sort documents via the -[BM25 algorithm](https://en.wikipedia.org/wiki/Okapi_BM25) -with 'k' = 1.2 and 'b' = 0.75 - - FOR doc IN someView - SORT BM25(doc, 1.2, 0.75) - RETURN doc - -to sort documents via the -[TFIDF algorithm](https://en.wikipedia.org/wiki/TF-IDF) - - FOR doc IN someView - SORT TFIDF(doc) - RETURN doc - -to sort documents via the -[TFIDF algorithm](https://en.wikipedia.org/wiki/TF-IDF) with norms - - FOR doc IN someView - SORT TFIDF(doc, true) - RETURN doc - -to sort documents by value of 'name' and then by the -[TFIDF algorithm](https://en.wikipedia.org/wiki/TF-IDF) where 'name' values are -equivalent - - FOR doc IN someView - SORT doc.name, TFIDF(doc) - RETURN doc - - -Use cases ---------- - -### Prefix search - -The data contained in our view looks like that: - -```json -{ "id": 1, "body": "ThisIsAVeryLongWord" } -{ "id": 2, "body": "ThisIsNotSoLong" } -{ "id": 3, "body": "ThisIsShorter" } -{ "id": 4, "body": "ThisIs" } -{ "id": 5, "body": "ButNotThis" } -``` - -We now want to search for documents where the attribute `body` starts with "ThisIs", - -A simple AQL query executing this prefix search: - - FOR doc IN someView SEARCH STARTS_WITH(doc.body, 'ThisIs') - RETURN doc - -It will find the documents with the ids `1`, `2`, `3`, `4`, but not `5`. diff --git a/Documentation/Books/AQL/Views/README.md b/Documentation/Books/AQL/Views/README.md deleted file mode 100644 index 1e8f19d78db3..000000000000 --- a/Documentation/Books/AQL/Views/README.md +++ /dev/null @@ -1,37 +0,0 @@ -Views in AQL -============ - -Conceptually a **view** is just another document data source, similar to an -array or a document/edge collection, e.g.: - -```js -FOR doc IN exampleView SEARCH ... - FILTER ... - SORT ... - RETURN ... -``` - -Other than collections, views have an additional but optional `SEARCH` keyword: - -```js -FOR doc IN exampleView - SEARCH ... - FILTER ... - SORT ... - RETURN ... -``` - -A view is meant to be an abstraction over a transformation applied to documents -of zero or more collections. The transformation is view-implementation specific -and may even be as simple as an identity transformation thus making the view -represent all documents available in the specified set of collections. - -Views can be defined and administered on a per view-type basis via -the [web interface](../../Manual/Programs/WebInterface/index.html). - -Currently there is a single supported view implementation, namely -`arangosearch` as described in [ArangoSearch View](ArangoSearch/README.md). - -Also see the detailed -[ArangoSearch tutorial](https://www.arangodb.com/tutorials/arangosearch/) -to learn more. diff --git a/Documentation/Books/AQL/book.json b/Documentation/Books/AQL/book.json deleted file mode 100644 index d6e5303842ba..000000000000 --- a/Documentation/Books/AQL/book.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "gitbook": "^3.2.2", - "title": "ArangoDB VERSION_NUMBER AQL Documentation", - "version": "VERSION_NUMBER", - "author": "ArangoDB GmbH", - "description": "Official AQL manual for ArangoDB - the native multi-model NoSQL database", - "language": "en", - "plugins": [ - "-search", - "-lunr", - "-sharing", - "toggle-chapters", - "addcssjs", - "anchorjs", - "sitemap-general@git+https://github.com/Simran-B/gitbook-plugin-sitemap-general.git", - "ga", - "callouts@git+https://github.com/Simran-B/gitbook-plugin-callouts.git", - "edit-link", - "page-toc@git+https://github.com/Simran-B/gitbook-plugin-page-toc.git", - "localized-footer" - ], - "pdf": { - "fontSize": 12, - "toc": true, - "margin": { - "right": 60, - "left": 60, - "top": 35, - "bottom": 35 - } - }, - "styles": { - "website": "styles/website.css" - }, - "pluginsConfig": { - "addcssjs": { - "js": ["styles/header.js", "styles/hs.js"], - "css": ["styles/header.css"] - }, - "sitemap-general": { - "prefix": "https://docs.arangodb.com/devel/AQL/", - "changefreq": "@GCHANGE_FREQ@", - "priority": @GPRIORITY@ - }, - "ga": { - "token": "UA-81053435-2" - }, - "edit-link": { - "base": "https://github.com/arangodb/arangodb/edit/devel/Documentation/Books/AQL", - "label": "Edit Page" - }, - "localized-footer": { - "filename": "FOOTER.html" - } - } -} diff --git a/Documentation/Books/AQL/styles/header.css b/Documentation/Books/AQL/styles/header.css deleted file mode 100644 index 4ec87c77b0e5..000000000000 --- a/Documentation/Books/AQL/styles/header.css +++ /dev/null @@ -1,305 +0,0 @@ -/* Design fix because of the header */ -@import url(https://fonts.googleapis.com/css?family=Roboto:400,500,300,700); - -body { - overflow: hidden; - font-family: Roboto, Helvetica, sans-serif; - background: #444444; -} - -.book .book-header h1 a, .book .book-header h1 a:hover { - display: none; -} - -/* GOOGLE START */ - -.google-search #gsc-iw-id1{ - border: none !important; -} - -.google-search .gsst_b { - position: relative; - top: 10px; - left: -25px; - width: 1px; -} - -.gsst_a .gscb_a { - color: #c01a07 !important; -} - -.google-search input { - background-color: #fff !important; - font-family: Roboto, Helvetica, sans-serif; - font-size: 10pt !important; - padding-left: 5px !important; - float: right; - position: relative; - top: 8px; - width: 100% !important; - height: 30px !important; -} - -.google-search input:active { -} - -.google-search { - margin-right: 10px; - margin-left: 10px !important; - float: right !important; -} - -.google-search td, -.google-search table, -.google-search tr, -.google-search th { - background-color: #444444 !important; -} - -.google-search .gsc-input-box, -.google-search .gsc-input-box input { - border-radius: 3px !important; - width: 200px; -} - -.gsc-branding-text, -.gsc-branding-img, -.gsc-user-defined-text { - display: none !important; -} - -.google-search .gsc-input-box input { - font-size: 16px !important; -} - -.google-search .gsc-search-button { - display: none !important; -} - -.google-search .gsc-control-cse { - padding: 10px !important; -} - -.google-search > div { - float: left !important; - width: 200px !important; -} - -/* GOOGLE END */ - -.book-summary, -.book-body { - margin-top: 48px; -} - -.arangodb-logo, .arangodb-logo-small { - display: inline; - float: left; - padding-top: 12px; - margin-left: 10px; -} - -.arangodb-logo img { - height: 23px; -} - -.arangodb-logo-small { - display: none; -} - -.arangodb-version-switcher { - width: 65px; - height: 44px; - margin-left: 16px; - float: left; - display: inline; - font-weight: bold; - color: #fff; - background-color: inherit; - border: 0; -} - -.arangodb-version-switcher option { - background-color: white; - color: black; -} - - -.arangodb-header { - position: fixed; - width: 100%; - height: 48px; - z-index: 1; -} - -.arangodb-header .socialIcons-googlegroups a img { - position: relative; - height: 14px; - top: 3px; -} - -.arangodb-navmenu { - display: block; - float: right; - margin: 0; - padding: 0; -} - -.arangodb-navmenu li { - display: block; - float: left; -} - -.arangodb-navmenu li a { - display: block; - float: left; - padding: 0 10px; - line-height: 48px; - font-size: 16px; - font-weight: 400; - color: #fff; - text-decoration: none; - font-family: Roboto, Helvetica, sans-serif; -} - -.arangodb-navmenu li.active-tab a, .arangodb-navmenu li a:hover { - background-color: #88A049 !important; -} - -.downloadIcon { - margin-right: 10px; -} - -/** simple responsive updates **/ - -@media screen and (max-width: 1000px) { - .arangodb-navmenu li a { - padding: 0 6px; - } - - .arangodb-logo { - margin-left: 10px; - } - - .google-search { - margin-right: 5px !important; - } - - .downloadIcon { - margin-right: 0; - } - - .socialIcons { - display: none !important; - } -} - - -@media screen and (max-width: 800px) { - - .google-search, - .google-search .gsc-input-box, - .google-search .gsc-input-box input { - width: 130px !important; - } - - .arangodb-navmenu li a { - font-size: 15px; - padding: 0 7px; - } - - .arangodb-logo { - display: none; - } - - .arangodb-logo-small { - display: inline; - margin-left: 10px; - } - - .arangodb-logo-small img { - height: 20px; - } - - .arangodb-version-switcher { - margin: 0; - } - -} - -@media screen and (max-width: 600px) { - .arangodb-navmenu li a { - font-size: 15px; - padding: 0 7px; - } - - .arangodb-version-switcher, - .downloadIcon { - display: none !important; - } - - .google-search, - .google-search .gsc-input-box, - .google-search .gsc-input-box input { - width: 24px !important; - } - - .google-search .gsc-input-box input[style] { - background: url(https://docs.arangodb.com/assets/searchIcon.png) left center no-repeat rgb(255, 255, 255) !important; - } - - .google-search .gsc-input-box input:focus { - width: 200px !important; - position: relative; - left: -176px; - background-position: -9999px -9999px !important; - } - -} - -@media screen and (max-width: 400px) { - .arangodb-navmenu li a { - font-size: 13px; - padding: 0 5px; - } - .google-search { - display: none; - } -} - -/*Hubspot Cookie notice */ - -body div#hs-eu-cookie-confirmation { - bottom: 0; - top: auto; - position: fixed; - text-align: center !important; -} - -body div#hs-eu-cookie-confirmation.can-use-gradients { - background-image: linear-gradient(to bottom, rgba(255,255,255,0.9),rgba(255,255,255,0.75)); -} - -body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner { - display: inline-block; - padding: 15px 18px 0; -} - -body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner #hs-en-cookie-confirmation-buttons-area { - float: left; -} - -body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner a#hs-eu-confirmation-button { - background-color: #577138 !important; - border: none !important; - text-shadow: none !important; - box-shadow: none; - padding: 5px 15px !important; - margin-left: 10px; -} - -body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner > p { - float: left; - color: #000 !important; - text-shadow: none; -} diff --git a/Documentation/Books/AQL/styles/header.js b/Documentation/Books/AQL/styles/header.js deleted file mode 100644 index b14877f2357c..000000000000 --- a/Documentation/Books/AQL/styles/header.js +++ /dev/null @@ -1,161 +0,0 @@ -// Try to set the version number early, jQuery not available yet -var searcheable_versions = [@BROWSEABLE_VERSIONS@]; -var cx = '@GSEARCH_ID@'; - -document.addEventListener("DOMContentLoaded", function(event) { - if (!gitbook.state.root) return; - var bookVersion = gitbook.state.root.match(/\/(\d\.\d(\.\d)?|devel)\//); - var switcher = document.getElementsByClassName("arangodb-version-switcher")[0]; - if (bookVersion) { - switcher.value = bookVersion[1]; - } else { - switcher.style.display = "none"; - } -}); - -window.onload = function(){ -window.localStorage.removeItem(":keyword"); - -$(document).ready(function() { - -function appendHeader() { - var VERSION_SELECTOR = "" - var i = 0; - var prefix; - for (i = 0; i < searcheable_versions.length; i++ ) { - if (searcheable_versions[i] === 'devel') { - prefix = ''; - } else { - prefix = 'v'; - } - VERSION_SELECTOR += '\n'; - } - - var div = document.createElement('div'); - div.innerHTML = '
\n' + - ' \n' + - '
\n' + - ' \n' + - ' \n' + - ' \n' + - '
\n' + - ' \n' + - ' \n' + - ' \n' + - '
\n'; - - $('.book').before(div.innerHTML); - - }; - - - function rerenderNavbar() { - $('#header').remove(); - appendHeader(); - }; - - //render header - rerenderNavbar(); - function addGoogleSrc() { - var gcse = document.createElement('script'); - gcse.type = 'text/javascript'; - gcse.async = true; - gcse.src = (document.location.protocol == 'https:' ? 'https:' : 'http:') + - '//cse.google.com/cse.js?cx=' + cx; - var s = document.getElementsByTagName('script')[0]; - s.parentNode.insertBefore(gcse, s); - }; - addGoogleSrc(); - - $(".arangodb-navmenu a[data-book]").on("click", function(e) { - e.preventDefault(); - var urlSplit = gitbook.state.root.split("/"); - urlSplit.pop(); // "" - urlSplit.pop(); // e.g. "Manual" - window.location.href = urlSplit.join("/") + "/" + e.target.getAttribute("data-book") + "/index.html"; - }); - - // set again using jQuery to accommodate non-standard browsers (*cough* IE *cough*) - var bookVersion = gitbook.state.root.match(/\/(\d\.\d(\.\d)?|devel)\//); - var switcher = $(".arangodb-version-switcher"); - if (bookVersion) { - switcher.val(bookVersion[1]); - } else { - switcher.hide(); - } - - $(".arangodb-version-switcher").on("change", function(e) { - var urlSplit = gitbook.state.root.split("/"); - urlSplit.pop(); // "" - var currentBook = urlSplit.pop(); // e.g. "Manual" - urlSplit.pop() // e.g. "3.0" - if (e.target.value == "2.8") { - var legacyMap = { - "Manual": "", - "AQL": "/Aql", - "HTTP": "/HttpApi", - "Cookbook": "/Cookbook" - }; - currentBook = legacyMap[currentBook]; - } else { - currentBook = "/" + currentBook; - } - window.location.href = urlSplit.join("/") + "/" + e.target.value + currentBook + "/index.html"; - }); - -}); - -}; diff --git a/Documentation/Books/AQL/styles/hs.js b/Documentation/Books/AQL/styles/hs.js deleted file mode 100644 index 9a8ae18a61d2..000000000000 --- a/Documentation/Books/AQL/styles/hs.js +++ /dev/null @@ -1,33 +0,0 @@ -// HubSpot Script Loader. Please do not block this resource. See more: http://hubs.ly/H0702_H0 - -(function (id, src, attrs) { - if (document.getElementById(id)) { - try { console.warn('duplicate hubspot script with id: "' + id + '" included on page'); } - finally { return; } - } - var js = document.createElement('script'); - js.src = src; - js.type = 'text/javascript'; - js.id = id; - for (var name in attrs) { if(attrs.hasOwnProperty(name)) { js.setAttribute(name, attrs[name]); } } - var e = document.getElementsByTagName('script')[0]; - e.parentNode.insertBefore(js, e); -})('hubspot-messages-loader', 'https://js.usemessages.com/messageswidgetshell.js', {"data-loader":"hs-scriptloader","data-hsjs-portal":2482448,"data-hsjs-env":"prod"}); - -(function (id, src) { - if (document.getElementById(id)) { return; } - var js = document.createElement('script'); - js.src = src; - js.type = 'text/javascript'; - js.id = id; - var e = document.getElementsByTagName('script')[0]; - e.parentNode.insertBefore(js, e); -})('hs-analytics', '//js.hs-analytics.net/analytics/1508760300000/2482448.js'); - -window.setTimeout(function () { - $('body').on('click', 'a', function () { - var _hsq = window._hsq = window._hsq || []; - _hsq.push(['setPath', window.location.pathname]); - _hsq.push(['trackPageView']); - }); -}, 1000); diff --git a/Documentation/Books/AQL/styles/website.css b/Documentation/Books/AQL/styles/website.css deleted file mode 100644 index 0bbc2f1eff37..000000000000 --- a/Documentation/Books/AQL/styles/website.css +++ /dev/null @@ -1,84 +0,0 @@ -.markdown-section small { - font-size: 80%; -} -.markdown-section sub, .markdown-section sup { - font-size: 75%; - line-height: 0; - position: relative; - vertical-align: baseline; -} -.markdown-section sup { - top: -.5em; -} -.markdown-section sub { - bottom: -.25em; -} - -div.example_show_button { - border: medium solid lightgray; - text-align: center; - position: relative; - top: -10px; - display: flex; - justify-content: center; -} - -.book .book-body .navigation.navigation-next { - right: 10px !important; -} - -.book .book-summary ul.summary li.active>a,.book .book-summary ul.summary li a:hover { - color: #fff !important; - background: #80A54D !important; - text-decoration: none; -} - -.book .book-body .page-wrapper .page-inner section.normal .deprecated{ - background-color: rgba(240,240,0,0.4); -} - -.book .book-body section > ul li:last-child { - margin-bottom: 0.85em; -} - -.book .book-body .alert p:last-child { - margin-bottom: 0; -} - -.columns-3 { - -webkit-column-count: 3; - -moz-column-count: 3; - -ms-column-count: 3; - -o-column-count: 3; - column-count: 3; - columns: 3; -} - -.localized-footer { - opacity: 0.5; -} - -.example-container { - position: relative; -} - -.example-container a.anchorjs-link { - position: absolute; - top: 10px; - right: 10px; - font: 1em/1 anchorjs-icons; -} - -.gsib_a { -padding: 0px !important; -} - -.gsc-control-cse { -border: 0px !important; -background-color: transparent !important; -} - - -.gsc-input { -margin: 0px !important; -} diff --git a/Documentation/Books/Cookbook/.gitkeep b/Documentation/Books/Cookbook/.gitkeep new file mode 100644 index 000000000000..936ca3adc4e3 --- /dev/null +++ b/Documentation/Books/Cookbook/.gitkeep @@ -0,0 +1,5 @@ +Git can not track empty repositories. +This file ensures that the directory is kept. + +Some of the old documentation building scripts are still +used by the new system which copy files into this folder. \ No newline at end of file diff --git a/Documentation/Books/Cookbook/AQL/AvoidingInjection.md b/Documentation/Books/Cookbook/AQL/AvoidingInjection.md deleted file mode 100644 index 9e1f45d509f1..000000000000 --- a/Documentation/Books/Cookbook/AQL/AvoidingInjection.md +++ /dev/null @@ -1,288 +0,0 @@ -Avoiding parameter injection in AQL -=================================== - -Problem -------- - -I don't want my AQL queries to be affected by parameter injection. - -### What is parameter injection? - -Parameter injection means that potentially content is inserted into a query, -and that injection may change the meaning of the query. It is a security issue -that may allow an attacker to execute arbitrary queries on the database data. - -It often occurs if applications trustfully insert user-provided inputs into a -query string, and do not fully or incorrectly filter them. It also occurs often -when applications build queries naively, without using security mechanisms often -provided by database software or querying mechanisms. - -Parameter injection examples ----------------------------- - -Assembling query strings with simple string concatenation looks trivial, -but is potentially unsafe. Let's start with a simple query that's fed with some -dynamic input value, let's say from a web form. A client application or a Foxx -route happily picks up the input value, and puts it into a query: - -```js -/* evil ! */ -var what = req.params("searchValue"); /* user input value from web form */ -... -var query = "FOR doc IN collection FILTER doc.value == " + what + " RETURN doc"; -db._query(query, params).toArray(); -``` - -The above will probably work fine for numeric input values. - -What could an attacker do to this query? Here are a few suggestions to use for the -`searchValue` parameter: - -- for returning all documents in the collection: `1 || true` -- for removing all documents: `1 || true REMOVE doc IN collection //` -- for inserting new documents: `1 || true INSERT { foo: "bar" } IN collection //` - -It should have become obvious that this is extremely unsafe and should be avoided. - -An pattern often seen to counteract this is trying to quote and escape potentially -unsafe input values before putting them into query strings. This may work in some situations, -but it's easy to overlook something or get it subtly wrong: - -```js -/* we're sanitzing now, but it's still evil ! */ -var value = req.params("searchValue").replace(/'/g, ''); -... -var query = "FOR doc IN collection FILTER doc.value == '" + value + "' RETURN doc"; -db._query(query, params).toArray(); -``` - -The above example uses single quotes for enclosing the potentially unsafe user -input, and also replaces all single quotes in the input value beforehand. Not only may -that change the user input (leading to subtle errors such as *"why does my search for -`O'Brien` don't return any results?"*), but it is also unsafe. If the user input contains -a backslash at the end (e.g. `foo bar\`), that backslash will escape the closing single -quote, allowing the user input to break out of the string fence again. - -It gets worse if user input is inserted into the query at multiple places. Let's assume -we have a query with two dynamic values: - -```js -query = "FOR doc IN collection FILTER doc.value == '" + value + "' && doc.type == '" + type + "' RETURN doc"; -``` - -If an attacker inserted `\` for parameter `value` and ` || true REMOVE doc IN collection //` for -parameter `type`, then the effective query would become - -``` -FOR doc IN collection FILTER doc.value == '\' && doc.type == ' || true REMOVE doc IN collection //' RETURN doc -``` - -which is highly undesirable. - - -Solution --------- - -Instead of mixing query string fragments with user inputs naively via string -concatenation, use either **bind parameters** or a **query builder**. Both can -help to avoid the problem of injection, because they allow separating the actual -query operations (like `FOR`, `INSERT`, `REMOVE`) from (user input) values. - -This recipe focuses on using bind parameters. This is not to say that query -builders shouldn't be used. They were simply omitted here for the sake of simplicity. -To get started with a using an AQL query builder in ArangoDB or other JavaScript -environments, have a look at [aqb](https://www.npmjs.com/package/aqb) (which comes -bundled with ArangoDB). Inside ArangoDB, there are also [Foxx queries](../../Manual/Foxx/index.html) -which can be combined with aqb. - -### What bind parameters are - -Bind parameters in AQL queries are special tokens that act as placeholders for -actual values. Here's an example: - -``` -FOR doc IN collection - FILTER doc.value == @what - RETURN doc -``` - -In the above query, `@what` is a bind parameter. In order to execute this query, -a value for bind parameter `@what` must be specified. Otherwise query execution will -fail with error 1551 (*no value specified for declared bind parameter*). If a value -for `@what` gets specified, the query can be executed. However, the query string -and the bind parameter values (i.e. the contents of the `@what` bind parameter) will -be handled separately. What's in the bind parameter will always be treated as a value, -and it can't get out of its sandbox and change the semantic meaning of a query. - -### How bind parameters are used - -To execute a query with bind parameters, the query string (containing the bind -parameters) and the bind parameter values are specified separately (note that when -the bind parameter value is assigned, the prefix `@` needs to be omitted): - -```js -/* query string with bind parameter */ -var query = "FOR doc IN collection FILTER doc.value == @what RETURN doc"; - -/* actual value for bind parameter */ -var params = { what: 42 }; - -/* run query, specifying query string and bind parameter separately */ -db._query(query, params).toArray(); -``` - -If a malicious user would set `@what` to a value of `1 || true`, this wouldn't do -any harm. AQL would treat the contents of `@what` as a single string token, and -the meaning of the query would remain unchanged. The actually executed query would be: - -``` -FOR doc IN collection - FILTER doc.value == "1 || true" - RETURN doc -``` - -Thanks to bind parameters it is also impossible to turn a selection (i.e. read-only) -query into a data deletion query. - -### Using JavaScript variables as bind parameters - -There is also a template string generator function `aql` that can be used to safely -(and conveniently) built AQL queries using JavaScript variables and expressions. It -can be invoked as follows: - -```js -const aql = require('@arangodb').aql; // not needed in arangosh - -var value = "some input value"; -var query = aql`FOR doc IN collection - FILTER doc.value == ${value} - RETURN doc`; -var result = db._query(query).toArray(); -``` - -Note that an ES6 template string is used for populating the `query` variable. The -string is assembled using the `aql` generator function which is bundled with -ArangoDB. The template string can contain references to JavaScript variables or -expressions via `${...}`. In the above example, the query references a variable -named `value`. The `aql` function generates an object with two separate -attributes: the query string, containing references to bind parameters, and the actual -bind parameter values. - -Bind parameter names are automatically generated by the `aql` function: - -```js -var value = "some input value"; -aql`FOR doc IN collection FILTER doc.value == ${value} RETURN doc`; - -{ - "query" : "FOR doc IN collection FILTER doc.value == @value0 RETURN doc", - "bindVars" : { - "value0" : "some input value" - } -} -``` - -### Using bind parameters in dynamic queries - -Bind parameters are helpful, so it makes sense to use them for handling the dynamic values. -You can even use them for queries that itself are highly dynamic, for example with conditional -`FILTER` and `LIMIT` parts. Here's how to do this: - -```js -/* note: this example has a slight issue... hang on reading */ -var query = "FOR doc IN collection"; -var params = { }; - -if (useFilter) { - query += " FILTER doc.value == @what"; - params.what = req.params("searchValue"); -} - -if (useLimit) { - /* not quite right, see below */ - query += " LIMIT @offset, @count"; - params.offset = req.params("offset"); - params.count = req.params("count"); -} - -query += " RETURN doc"; -db._query(query, params).toArray(); -``` - -Note that in this example we're back to string concatenation, but without the problem of -the query being vulnerable to arbitrary modifications. - -### Input value validation and sanitation - -Still you should prefer to be paranoid, and try to detect invalid input values as early as -possible, at least before executing a query with them. This is because some input parameters -may affect the runtime behavior of queries negatively or, when modified, may lead to queries -throwing runtime errors instead of returning valid results. This isn't something an attacker -should deserve. - -`LIMIT` is a good example for this: if used with a single argument, the argument should -be numeric. When `LIMIT` is given a string value, executing the query will fail. You -may want to detect this early and don't return an HTTP 500 (as this would signal attackers -that they were successful breaking your application). - -Another problem with `LIMIT` is that high `LIMIT` values are likely more expensive than low -ones, and you may want to disallow using `LIMIT` values exceeding a certain threshold. - -Here's what you could do in such cases: - -```js -var query = "FOR doc IN collection LIMIT @count RETURN doc"; - -/* some default value for limit */ -var params = { count: 100 }; - -if (useLimit) { - var count = req.params("count"); - - /* abort if value does not look like an integer */ - if (! preg_match(/^d+$/, count)) { - throw "invalid count value!"; - } - - /* actually turn it into an integer */ - params.count = parseInt(count, 10); // turn into numeric value -} - -if (params.count < 1 || params.count > 1000) { - /* value is outside of accepted thresholds */ - throw "invalid count value!"; -} - -db._query(query, params).toArray(); -``` - -This is a bit more complex, but that's a price you're likely willing to pay for a -bit of extra safety. In reality you may want to use a framework for validation (such as -[joi](https://www.npmjs.com/package/joi) which comes bundled with ArangoDB) instead -of writing your own checks all over the place. - -### Bind parameter types - -There are two types of bind parameters in AQL: - -- bind parameters for values: those are prefixed with a single `@` in AQL queries, and - are specified without the prefix when they get their value assigned. These bind parameters - can contain any valid JSON value. - - Examples: `@what`, `@searchValue` - -- bind parameters for collections: these are prefixed with `@@` in AQL queries, and are - replaced with the name of a collection. When the bind parameter value is assigned, the - parameter itself must be specified with a single `@` prefix. Only string values are allowed - for this type of bind parameters. - - Examples: `@@collection` - -The latter type of bind parameter is probably not used as often, and it should not be used -together with user input. Otherwise users may freely determine on which collection your -AQL queries will operate (note: this may be a valid use case, but normally it is extremely -undesired). - -**Authors**: [Jan Steemann](https://github.com/jsteemann) - -**Tags**: #injection #aql #security diff --git a/Documentation/Books/Cookbook/AQL/CreatingTestData.md b/Documentation/Books/Cookbook/AQL/CreatingTestData.md deleted file mode 100644 index f7b338b1bf08..000000000000 --- a/Documentation/Books/Cookbook/AQL/CreatingTestData.md +++ /dev/null @@ -1,92 +0,0 @@ -Creating test data with AQL -=========================== - -Problem -------- - -I want to create some test documents. - -Solution --------- - -If you haven't yet created a collection to hold the documents, create one now using the -ArangoShell: - -```js -db._create("myCollection"); -``` - -This has created a collection named *myCollection*. - -One of the easiest ways to fill a collection with test data is to use an AQL query that -iterates over a range. - -Run the following AQL query from the **AQL editor** in the web interface to insert 1,000 -documents into the just created collection: - -``` -FOR i IN 1..1000 - INSERT { name: CONCAT("test", i) } IN myCollection -``` - -The number of documents to create can be modified easily be adjusting the range boundary -values. - -To create more complex test data, adjust the AQL query! - -Let's say we also want a `status` attribute, and fill it with integer values between `1` to -(including) `5`, with equal distribution. A good way to achieve this is to use the modulo -operator (`%`): - -``` -FOR i IN 1..1000 - INSERT { - name: CONCAT("test", i), - status: 1 + (i % 5) - } IN myCollection -``` - -To create pseudo-random values, use the `RAND()` function. It creates pseudo-random numbers -between 0 and 1. Use some factor to scale the random numbers, and `FLOOR()` to convert the -scaled number back to an integer. - -For example, the following query populates the `value` attribute with numbers between 100 and -150 (including): - -``` -FOR i IN 1..1000 - INSERT { - name: CONCAT("test", i), - value: 100 + FLOOR(RAND() * (150 - 100 + 1)) - } IN myCollection -``` - -After the test data has been created, it is often helpful to verify it. The -`RAND()` function is also a good candidate for retrieving a random sample of the documents in the -collection. This query will retrieve 10 random documents: - -``` -FOR doc IN myCollection - SORT RAND() - LIMIT 10 - RETURN doc -``` - -The `COLLECT` clause is an easy mechanism to run an aggregate analysis on some attribute. Let's -say we wanted to verify the data distribution inside the `status` attribute. In this case we -could run: - -``` -FOR doc IN myCollection - COLLECT value = doc.value WITH COUNT INTO count - RETURN { - value: value, - count: count - } -``` - -The above query will provide the number of documents per distinct `value`. - -**Author:** [Jan Steemann](https://github.com/jsteemann) - -**Tags**: #aql diff --git a/Documentation/Books/Cookbook/AQL/DiffingDocuments.md b/Documentation/Books/Cookbook/AQL/DiffingDocuments.md deleted file mode 100644 index 32741846d47d..000000000000 --- a/Documentation/Books/Cookbook/AQL/DiffingDocuments.md +++ /dev/null @@ -1,126 +0,0 @@ -Diffing Two Documents in AQL -============================ - -Problem -------- - -How to create a `diff` of documents in AQL - -Solution --------- - -Though there is no built-in AQL function to `diff` two documents, it is easily possible to build your own like in the following query: - -``` -/* input document 1*/ -LET doc1 = { - "foo" : "bar", - "a" : 1, - "b" : 2 -} - -/* input document 2 */ -LET doc2 = { - "foo" : "baz", - "a" : 2, - "c" : 3 -} - -/* collect attributes present in doc1, but missing in doc2 */ -LET missing = ( - FOR key IN ATTRIBUTES(doc1) - FILTER ! HAS(doc2, key) - RETURN { - [ key ]: doc1[key] - } -) - -/* collect attributes present in both docs, but that have different values */ -LET changed = ( - FOR key IN ATTRIBUTES(doc1) - FILTER HAS(doc2, key) && doc1[key] != doc2[key] - RETURN { - [ key ] : { - old: doc1[key], - new: doc2[key] - } - } -) - -/* collect attributes present in doc2, but missing in doc1 */ -LET added = ( - FOR key IN ATTRIBUTES(doc2) - FILTER ! HAS(doc1, key) - RETURN { - [ key ] : doc2[key] - } -) - -/* return final result */ -RETURN { - "missing" : missing, - "changed" : changed, - "added" : added -} -``` - -**Note**: The query may look a bit lengthy, but much of that is due to formatting. A more terse version can be found below. - -The above query will return a document with three attributes: - -- _missing_: Contains all attributes only present in first document (i.e. missing in second document) -- _changed_: Contains all attributes present in both documents that have different values -- _added_: Contains all attributes only present in second document (i.e. missing in first document) - -For the two example documents it will return: - -```json -[ - { - "missing" : [ - { - "b" : 2 - } - ], - "changed" : [ - { - "foo" : { - "old" : "bar", - "new" : "baz" - } - }, - { - "a" : { - "old" : 1, - "new" : 2 - } - } - ], - "added" : [ - { - "c" : 3 - } - ] - } -] -``` - - -That output format was the first that came to my mind. It is of course possible to adjust the query so it produces a different output format. - -Following is a version of the same query that can be invoked from JavaScript easily. It passes the two documents as bind parameters and calls `db._query`. The query is now an one-liner (less readable but easier to copy&paste): - -```js -bindVariables = { - doc1 : { "foo" : "bar", "a" : 1, "b" : 2 }, - doc2 : { "foo" : "baz", "a" : 2, "c" : 3 } -}; - -query = "LET doc1 = @doc1, doc2 = @doc2, missing = (FOR key IN ATTRIBUTES(doc1) FILTER ! HAS(doc2, key) RETURN { [ key ]: doc1[key] }), changed = (FOR key IN ATTRIBUTES(doc1) FILTER HAS(doc2, key) && doc1[key] != doc2[key] RETURN { [ key ] : { old: doc1[key], new: doc2[key] } }), added = (FOR key IN ATTRIBUTES(doc2) FILTER ! HAS(doc1, key) RETURN { [ key ] : doc2[key] }) RETURN { missing : missing, changed : changed, added : added }"; - -result = db._query(query, bindVariables).toArray(); -``` - -**Author:** [Jan Steemann](https://github.com/jsteemann) - -**Tags**: #howto #aql diff --git a/Documentation/Books/Cookbook/AQL/DynamicAttributeNames.md b/Documentation/Books/Cookbook/AQL/DynamicAttributeNames.md deleted file mode 100644 index 16d47be27897..000000000000 --- a/Documentation/Books/Cookbook/AQL/DynamicAttributeNames.md +++ /dev/null @@ -1,197 +0,0 @@ -Using dynamic attribute names in AQL -==================================== - -Problem -------- - -I want an AQL query to return results with attribute names assembled by a function, -or with a variable number of attributes. - -This will not work by specifying the result using a regular object literal, as object -literals require the names and numbers of attributes to be fixed at query compile time. - -Solution --------- - -There are several solutions to getting dynamic attribute names to work. - -### Subquery solution - -A general solution is to let a subquery or another function to produce the dynamic -attribute names, and finally pass them through the `ZIP()` function to create an object -from them. - -Let's assume we want to process the following input documents: - -```json -{ "name" : "test", "gender" : "f", "status" : "active", "type" : "user" } -{ "name" : "dummy", "gender" : "m", "status" : "inactive", "type" : "unknown", "magicFlag" : 23 } -``` - -Let's also assume our goal for each of these documents is to return only the attribute -names that contain the letter `a`, together with their respective values. - -To extract the attribute names and values from the original documents, we can use a subquery -as follows: - -``` -LET documents = [ - { "name" : "test"," gender" : "f", "status" : "active", "type" : "user" }, - { "name" : "dummy", "gender" : "m", "status" : "inactive", "type" : "unknown", "magicFlag" : 23 } -] - -FOR doc IN documents - RETURN ( - FOR name IN ATTRIBUTES(doc) - FILTER LIKE(name, '%a%') - RETURN { - name: name, - value: doc[name] - } - ) -``` - -The subquery will only let attribute names pass that contain the letter `a`. The results -of the subquery are then made available to the main query and will be returned. But the -attribute names in the result are still `name` and `value`, so we're not there yet. - -So let's also employ AQL's `ZIP()` function, which can create an object from two arrays: - -* the first parameter to `ZIP()` is an array with the attribute names -* the second parameter to `ZIP()` is an array with the attribute values - -Instead of directly returning the subquery result, we first capture it in a variable, and -pass the variable's `name` and `value` components into `ZIP()` like this: - -``` -LET documents = [ - { "name" : "test"," gender" : "f", "status" : "active", "type" : "user" }, - { "name" : "dummy", "gender" : "m", "status" : "inactive", "type" : "unknown", "magicFlag" : 23 } -] - -FOR doc IN documents - LET attributes = ( - FOR name IN ATTRIBUTES(doc) - FILTER LIKE(name, '%a%') - RETURN { - name: name, - value: doc[name] - } - ) - RETURN ZIP(attributes[*].name, attributes[*].value) -``` - -Note that we have to use the expansion operator (`[*]`) on `attributes` because `attributes` -itself is an array, and we want either the `name` attribute or the `value` attribute of each -of its members. - -To prove this is working, here is the above query's result: - -```json -[ - { - "name": "test", - "status": "active" - }, - { - "name": "dummy", - "status": "inactive", - "magicFlag": 23 - } -] -``` - -As can be seen, the two results have a different amount of result attributes. We can also -make the result a bit more dynamic by prefixing each attribute with the value of the `name` -attribute: - -``` -LET documents = [ - { "name" : "test"," gender" : "f", "status" : "active", "type" : "user" }, - { "name" : "dummy", "gender" : "m", "status" : "inactive", "type" : "unknown", "magicFlag" : 23 } -] - -FOR doc IN documents - LET attributes = ( - FOR name IN ATTRIBUTES(doc) - FILTER LIKE(name, '%a%') - RETURN { - name: CONCAT(doc.name, '-', name), - value: doc[name] - } - ) - RETURN ZIP(attributes[*].name, attributes[*].value) -``` - -That will give us document-specific attribute names like this: - -```json -[ - { - "test-name": "test", - "test-status": "active" - }, - { - "dummy-name": "dummy", - "dummy-status": "inactive", - "dummy-magicFlag": 23 - } -] -``` - -### Using expressions as attribute names (ArangoDB 2.5) - -If the number of dynamic attributes to return is known in advance, and only the attribute names -need to be calculated using an expression, then there is another solution. - -ArangoDB 2.5 and higher allow using expressions instead of fixed attribute names in object literals. -Using expressions as attribute names requires enclosing the expression in extra `[` and `]` to -disambiguate them from regular, unquoted attribute names. - -Let's create a result that returns the original document data contained in a dynamically named -attribute. We'll be using the expression `doc.type` for the attribute name. We'll also return -some other attributes from the original documents, but prefix them with the documents' `_key` -attribute values. For this we also need attribute name expressions. - -Here is a query showing how to do this. The attribute name expressions all required to be -enclosed in `[` and `]` in order to make this work: - -``` -LET documents = [ - { "_key" : "3231748397810", "gender" : "f", "status" : "active", "type" : "user" }, - { "_key" : "3231754427122", "gender" : "m", "status" : "inactive", "type" : "unknown" } -] - -FOR doc IN documents - RETURN { - [ doc.type ] : { - [ CONCAT(doc._key, "_gender") ] : doc.gender, - [ CONCAT(doc._key, "_status") ] : doc.status - } - } -``` - -This will return: - -```json -[ - { - "user": { - "3231748397810_gender": "f", - "3231748397810_status": "active" - } - }, - { - "unknown": { - "3231754427122_gender": "m", - "3231754427122_status": "inactive" - } - } -] -``` - -Note: attribute name expressions and regular, unquoted attribute names can be mixed. - -**Author:** [Jan Steemann](https://github.com/jsteemann) - -**Tags**: #aql diff --git a/Documentation/Books/Cookbook/AQL/Joins.md b/Documentation/Books/Cookbook/AQL/Joins.md deleted file mode 100644 index ac19c048b83f..000000000000 --- a/Documentation/Books/Cookbook/AQL/Joins.md +++ /dev/null @@ -1,582 +0,0 @@ -Using Joins in AQL -================== - -Problem -------- - -I want to join documents from collections in an AQL query. - -- One-to-Many: I have a collection users and a collection cities. A user lives in a city and I need the city information during the query. - -- Many-To-Many: I have a collection authors and books. An author can write many - books and a book can have many authors. I want to return a list of books with - their authors. Therefore I need to join the authors and books. - -Solution --------- - -Unlike many NoSQL databases, ArangoDB does support joins in AQL queries. This is -similar to the way traditional relational databases handle this. However, -because documents allow for more flexibility, joins are also more flexible. The -following sections provide solutions for common questions. - -### One-To-Many - -You have a collection called users. Users live in city and a city is identified -by its primary key. In principle you can embedded the city document into the users -document and be happy with it. - -```json -{ - "_id" : "users/2151975421", - "_key" : "2151975421", - "_rev" : "2151975421", - "name" : { - "first" : "John", - "last" : "Doe" - }, - "city" : { - "name" : "Metropolis" - } -} -``` - -This works well for many use cases. Now assume, that you have additional -information about the city, like the number of people living in it. It would be -impractical to change each and every user document if this numbers -changes. Therefore it is good idea to hold the city information in a separate -collection. - -```json -arangosh> db.cities.document("cities/2241300989"); -{ - "population" : 1000, - "name" : "Metropolis", - "_id" : "cities/2241300989", - "_rev" : "2241300989", - "_key" : "2241300989" -} -``` - -Now you instead of embedding the city directly in the user document, you can use -the key of the city. - -```json -arangosh> db.users.document("users/2290649597"); -{ - "name" : { - "first" : "John", - "last" : "Doe" - }, - "city" : "cities/2241300989", - "_id" : "users/2290649597", - "_rev" : "2290649597", - "_key" : "2290649597" -} -``` - -We can now join these two collections very easily. - -```json -arangosh> db._query( -........>"FOR u IN users " + -........>" FOR c IN cities " + -........>" FILTER u.city == c._id RETURN { user: u, city: c }" -........>).toArray() -[ - { - "user" : { - "name" : { - "first" : "John", - "last" : "Doe" - }, - "city" : "cities/2241300989", - "_id" : "users/2290649597", - "_rev" : "2290649597", - "_key" : "2290649597" - }, - "city" : { - "population" : 1000, - "name" : "Metropolis", - "_id" : "cities/2241300989", - "_rev" : "2241300989", - "_key" : "2241300989" - } - } -] -``` - -Unlike SQL there is no special JOIN keyword. The optimizer ensures that the -primary index is used in the above query. - -However, very often it is much more convenient for the client of the query if a -single document would be returned, where the city information is embedded in the -user document - as in the simple example above. With AQL there you do not need -to forgo this simplification. - -```json -arangosh> db._query( -........>"FOR u IN users " + -........>" FOR c IN cities " + -........>" FILTER u.city == c._id RETURN merge(u, {city: c})" -........>).toArray() -[ - { - "_id" : "users/2290649597", - "_key" : "2290649597", - "_rev" : "2290649597", - "name" : { - "first" : "John", - "last" : "Doe" - }, - "city" : { - "_id" : "cities/2241300989", - "_key" : "2241300989", - "_rev" : "2241300989", - "population" : 1000, - "name" : "Metropolis" - } - } -] -``` - -So you can have both: the convenient representation of the result for your client -and the flexibility of joins for your data model. - -### Many-To-Many - -In the relational word you need a third table to model the many-to-many -relation. In ArangoDB you have a choice depending on the information you are -going to store and the type of questions you are going to ask. - -Assume that authors are stored in one collection and books in a second. If all -you need is "which are the authors of a book" then you can -easily model this as a list attribute in users. - -If you want to store more information, for example which author wrote which page in a conference proceeding, or if you also want to know "which books were written by which author", you can use edge collections. This is very similar to the "join table" from the relational world. - -#### Embedded Lists - -If you only want to store the authors of a book, you can embed them as list in the book document. There is no need for a separate collection. - -```json -arangosh> db.authors.toArray() -[ - { - "_id" : "authors/2661190141", - "_key" : "2661190141", - "_rev" : "2661190141", - "name" : { - "first" : "Maxima", - "last" : "Musterfrau" - } - }, - { - "_id" : "authors/2658437629", - "_key" : "2658437629", - "_rev" : "2658437629", - "name" : { - "first" : "John", - "last" : "Doe" - } - } -] -``` - -You can query books - -```json -arangosh> db._query("FOR b IN books RETURN b").toArray(); -[ - { - "_id" : "books/2681506301", - "_key" : "2681506301", - "_rev" : "2681506301", - "title" : "The beauty of JOINS", - "authors" : [ - "authors/2661190141", - "authors/2658437629" - ] - } -] -``` - -and join the authors in a very similar manner given in the one-to-many section. - -```json -arangosh> db._query( -........>"FOR b IN books " + -........>" LET a = (FOR x IN b.authors " + -........>" FOR a IN authors FILTER x == a._id RETURN a) " + -........>" RETURN { book: b, authors: a }" -........>).toArray(); -[ - { - "book" : { - "title" : "The beauty of JOINS", - "authors" : [ - "authors/2661190141", - "authors/2658437629" - ], - "_id" : "books/2681506301", - "_rev" : "2681506301", - "_key" : "2681506301" - }, - "authors" : [ - { - "name" : { - "first" : "Maxima", - "last" : "Musterfrau" - }, - "_id" : "authors/2661190141", - "_rev" : "2661190141", - "_key" : "2661190141" - }, - { - "name" : { - "first" : "John", - "last" : "Doe" - }, - "_id" : "authors/2658437629", - "_rev" : "2658437629", - "_key" : "2658437629" - } - ] - } -] -``` - -or embed the authors directly - -```json -arangosh> db._query( -........>"FOR b IN books LET a = (" + -........>" FOR x IN b.authors " + -........>" FOR a IN authors FILTER x == a._id RETURN a)" + -........>" RETURN merge(b, { authors: a })" -........>).toArray(); -[ - { - "_id" : "books/2681506301", - "_key" : "2681506301", - "_rev" : "2681506301", - "title" : "The beauty of JOINS", - "authors" : [ - { - "_id" : "authors/2661190141", - "_key" : "2661190141", - "_rev" : "2661190141", - "name" : { - "first" : "Maxima", - "last" : "Musterfrau" - } - }, - { - "_id" : "authors/2658437629", - "_key" : "2658437629", - "_rev" : "2658437629", - "name" : { - "first" : "John", - "last" : "Doe" - } - } - ] - } -] -``` - -#### Using Edge Collections - -If you also want to query which books are written by a given author, embedding authors -in the book document is possible, but it is more efficient to use a edge collections for -speed. - -Or you are publishing a proceeding, then you want to store the pages the author has written -as well. This information can be stored in the edge document. - -First create the users - -```json -arangosh> db._create("authors"); -[ArangoCollection 2926807549, "authors" (type document, status loaded)] - -arangosh> db.authors.save({ name: { first: "John", last: "Doe" } }) -{ - "error" : false, - "_id" : "authors/2935261693", - "_rev" : "2935261693", - "_key" : "2935261693" -} - -arangosh> db.authors.save({ name: { first: "Maxima", last: "Musterfrau" } }) -{ - "error" : false, - "_id" : "authors/2938210813", - "_rev" : "2938210813", - "_key" : "2938210813" -} -``` - -Now create the books without any author information. - -```json -arangosh> db._create("books"); -[ArangoCollection 2928380413, "books" (type document, status loaded)] - -arangosh> db.books.save({ title: "The beauty of JOINS" }); -{ - "error" : false, - "_id" : "books/2980088317", - "_rev" : "2980088317", - "_key" : "2980088317" -} -``` - -An edge collection is now used to link authors and books. - -```json -arangosh> db._createEdgeCollection("written"); -[ArangoCollection 2931132925, "written" (type edge, status loaded)] - -arangosh> db.written.save("authors/2935261693", -........>"books/2980088317", -........>{ pages: "1-10" }) -{ - "error" : false, - "_id" : "written/3006237181", - "_rev" : "3006237181", - "_key" : "3006237181" -} - -arangosh> db.written.save("authors/2938210813", -........>"books/2980088317", -........>{ pages: "11-20" }) -{ - "error" : false, - "_id" : "written/3012856317", - "_rev" : "3012856317", - "_key" : "3012856317" -} -``` - -In order to get all books with their authors you can use a [graph -traversal](../../AQL/Graphs/Traversals.html#working-with-collection-sets) - -```json -arangosh> db._query( -...> "FOR b IN books " + -...> "LET authorsByBook = ( " + -...> " FOR author, writtenBy IN INBOUND b written " + -...> " RETURN { " + -...> " vertex: author, " + -...> " edge: writtenBy " + -...> " } " + -...> ") " + -...> "RETURN { " + -...> " book: b, " + -...> " authors: authorsByBook " + -...> "} " -...> ).toArray(); -[ - { - "book" : { - "_key" : "2980088317", - "_id" : "books/2980088317", - "_rev" : "2980088317", - "title" : "The beauty of JOINS" - }, - "authors" : [ - { - "vertex" : { - "_key" : "2935261693", - "_id" : "authors/2935261693", - "_rev" : "2935261693", - "name" : { - "first" : "John", - "last" : "Doe" - } - }, - "edge" : { - "_key" : "2935261693", - "_id" : "written/2935261693", - "_from" : "authors/2935261693", - "_to" : "books/2980088317", - "_rev" : "3006237181", - "pages" : "1-10" - } - }, - { - "vertex" : { - "_key" : "2938210813", - "_id" : "authors/2938210813", - "_rev" : "2938210813", - "name" : { - "first" : "Maxima", - "last" : "Musterfrau" - } - }, - "edge" : { - "_key" : "6833274", - "_id" : "written/6833274", - "_from" : "authors/2938210813", - "_to" : "books/2980088317", - "_rev" : "3012856317", - "pages" : "11-20" - } - } - ] - } -] -``` - -Or if you want only the information stored in the vertices. - -```json -arangosh> db._query( -...> "FOR b IN books " + -...> "LET authorsByBook = ( " + -...> " FOR author IN INBOUND b written " + -...> " OPTIONS { " + -...> " bfs: true, " + -...> " uniqueVertices: 'global' " + -...> " } " + -...> " RETURN author " + -...> ") " + -...> "RETURN { " + -...> " book: b, " + -...> " authors: authorsByBook " + -...> "} " -...> ).toArray(); -[ - { - "book" : { - "_key" : "2980088317", - "_id" : "books/2980088317", - "_rev" : "2980088317", - "title" : "The beauty of JOINS" - }, - "authors" : [ - { - "_key" : "2938210813", - "_id" : "authors/2938210813", - "_rev" : "2938210813", - "name" : { - "first" : "Maxima", - "last" : "Musterfrau" - } - }, - { - "_key" : "2935261693", - "_id" : "authors/2935261693", - "_rev" : "2935261693", - "name" : { - "first" : "John", - "last" : "Doe" - } - } - ] - } -] -``` - -Or again embed the authors directly into the book document. - -```json -arangosh> db._query( -...> "FOR b IN books " + -...> "LET authors = ( " + -...> " FOR author IN INBOUND b written " + -...> " OPTIONS { " + -...> " bfs: true, " + -...> " uniqueVertices: 'global' " + -...> " } " + -...> " RETURN author " + -...> ") " + -...> "RETURN MERGE(b, {authors: authors}) " -...> ).toArray(); -[ - { - "_id" : "books/2980088317", - "_key" : "2980088317", - "_rev" : "2980088317", - "title" : "The beauty of JOINS", - "authors" : [ - { - "_key" : "2938210813", - "_id" : "authors/2938210813", - "_rev" : "2938210813", - "name" : { - "first" : "Maxima", - "last" : "Musterfrau" - } - }, - { - "_key" : "2935261693", - "_id" : "authors/2935261693", - "_rev" : "2935261693", - "name" : { - "first" : "John", - "last" : "Doe" - } - } - ] - } -] -``` - -If you need the authors and their books, simply reverse the direction. - -```json -> db._query( -...> "FOR a IN authors " + -...> "LET booksByAuthor = ( " + -...> " FOR b IN OUTBOUND a written " + -...> " OPTIONS { " + -...> " bfs: true, " + -...> " uniqueVertices: 'global' " + -...> " } " + -...> " RETURN b" + -...> ") " + -...> "RETURN MERGE(a, {books: booksByAuthor}) " -...> ).toArray(); -[ - { - "_id" : "authors/2935261693", - "_key" : "2935261693", - "_rev" : "2935261693", - "name" : { - "first" : "John", - "last" : "Doe" - }, - "books" : [ - { - "_key" : "2980088317", - "_id" : "books/2980088317", - "_rev" : "2980088317", - "title" : "The beauty of JOINS" - } - ] - }, - { - "_id" : "authors/2938210813", - "_key" : "2938210813", - "_rev" : "2938210813", - "name" : { - "first" : "Maxima", - "last" : "Musterfrau" - }, - "books" : [ - { - "_key" : "2980088317", - "_id" : "books/2980088317", - "_rev" : "2980088317", - "title" : "The beauty of JOINS" - } - ] - } -] -``` - -**Authors**: [Frank Celler](https://github.com/fceller) - -**Tags**: #join #aql diff --git a/Documentation/Books/Cookbook/AQL/MigratingEdgeFunctionsTo3.md b/Documentation/Books/Cookbook/AQL/MigratingEdgeFunctionsTo3.md deleted file mode 100644 index f83d0d139151..000000000000 --- a/Documentation/Books/Cookbook/AQL/MigratingEdgeFunctionsTo3.md +++ /dev/null @@ -1,395 +0,0 @@ -Migrating anonymous graph Functions from 2.8 or earlier to 3.0 -============================================================== - -Problem -------- - -With the release of 3.0 all GRAPH functions have been dropped from AQL in favor of a more -native integration of graph features into the query language. I have used the old graph -functions and want to upgrade to 3.0. - -Graph functions covered in this recipe: - -* EDGES -* NEIGHBORS -* PATHS -* TRAVERSAL -* TRAVERSAL_TREE - -Solution --------- - -### EDGES - -The EDGES can be simply replaced by a call to the AQL traversal. - -**No options** - -The syntax is slightly different but mapping should be simple: - -``` -// OLD -[..] FOR e IN EDGES(@@edgeCollection, @startId, 'outbound') RETURN e - -// NEW -[..] FOR v, e IN OUTBOUND @startId @@edgeCollection RETURN e -``` - -**Using EdgeExamples** - -Examples have to be transformed into AQL filter statements. -How to do this please read the GRAPH_VERTICES section -in [Migrating GRAPH_* Functions from 2.8 or earlier to 3.0](MigratingGraphFunctionsTo3.md). -Apply these filters on the edge variable `e`. - -**Option incluceVertices** - -In order to include the vertices you just use the vertex variable v as well: - -``` -// OLD -[..] FOR e IN EDGES(@@edgeCollection, @startId, 'outbound', null, {includeVertices: true}) RETURN e - -// NEW -[..] FOR v, e IN OUTBOUND @startId @@edgeCollection RETURN {edge: e, vertex: v} -``` - -NOTE: The direction cannot be given as a bindParameter any more it has to be hard-coded in the query. - -### NEIGHBORS - -The NEIGHBORS is a breadth-first-search on the graph with a global unique check for vertices. So we can replace it by a an AQL traversal with these options. -Due to syntax changes the vertex collection of the start vertex is no longer mandatory to be given. -You may have to adjust bindParameteres for this query. - -**No options** - -The default options did just return the neighbors `_id` value. - -``` -// OLD -[..] FOR n IN NEIGHBORS(@@vertexCollection, @@edgeCollection, @startId, 'outbound') RETURN n - -// NEW -[..] FOR n IN OUTBOUND @startId @@edgeCollection OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN n._id -``` - -NOTE: The direction cannot be given as a bindParameter any more it has to be hard-coded in the query. - -**Using edgeExamples** - -Examples have to be transformed into AQL filter statements. -How to do this please read the GRAPH_VERTICES section -in [Migrating GRAPH_* Functions from 2.8 or earlier to 3.0](MigratingGraphFunctionsTo3.md). -Apply these filters on the edge variable `e` which is the second return variable of the traversal statement. - -However this is a bit more complicated as it interferes with the global uniqueness check. -For edgeExamples it is sufficent when any edge pointing to the neighbor matches the filter. Using `{uniqueVertices: 'global'}` first picks any edge randomly. Than it checks against this edge only. -If we know there are no vertex pairs with multiple edges between them we can use the simple variant which is save: - -``` -// OLD -[..] FOR n IN NEIGHBORS(@@vertexCollection, @@edgeCollection, @startId, 'outbound', {label: 'friend'}) RETURN n - -// NEW -[..] FOR n, e IN OUTBOUND @startId @@edgeCollection OPTIONS {bfs: true, uniqueVertices: 'global'} -FILTER e.label == 'friend' -RETURN n._id -``` - -If there may be multiple edges between the same pair of vertices we have to make the distinct check ourselfes and cannot rely on the traverser doing it correctly for us: - -``` -// OLD -[..] FOR n IN NEIGHBORS(@@vertexCollection, @@edgeCollection, @startId, 'outbound', {label: 'friend'}) RETURN n - -// NEW -[..] FOR n, e IN OUTBOUND @startId @@edgeCollection OPTIONS {bfs: true} -FILTER e.label == 'friend' -RETURN DISTINCT n._id -``` - -**Option includeData** - -If you want to include the data simply return the complete document instead of only the `_id`value. - -``` -// OLD -[..] FOR n IN NEIGHBORS(@@vertexCollection, @@edgeCollection, @startId, 'outbound', null, {includeData: true}) RETURN n - -// NEW -[..] FOR n, e IN OUTBOUND @startId @@edgeCollection OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN n -``` - -### PATHS - -This function computes all paths of the entire edge collection (with a given minDepth and maxDepth) as you can imagine this feature is extremely expensive and should never be used. -However paths can again be replaced by AQL traversal. - -**No options** -By default paths of length 0 to 10 are returned. And circles are not followed. - -``` -// OLD -RETURN PATHS(@@vertexCollection, @@edgeCollection, "outbound") - -// NEW -FOR start IN @@vertexCollection -FOR v, e, p IN 0..10 OUTBOUND start @@edgeCollection RETURN {source: start, destination: v, edges: p.edges, vertices: p.vertices} -``` - -**followCycles** - -If this option is set we have to modify the options of the traversal by modifying the `uniqueEdges` property: - -``` -// OLD -RETURN PATHS(@@vertexCollection, @@edgeCollection, "outbound", {followCycles: true}) - -// NEW -FOR start IN @@vertexCollection -FOR v, e, p IN 0..10 OUTBOUND start @@edgeCollection OPTIONS {uniqueEdges: 'none'} RETURN {source: start, destination: v, edges: p.edges, vertices: p.vertices} -``` - -**minDepth and maxDepth** - -If this option is set we have to give these parameters directly before the direction. - -``` -// OLD -RETURN PATHS(@@vertexCollection, @@edgeCollection, "outbound", {minDepth: 2, maxDepth: 5}) - -// NEW -FOR start IN @@vertexCollection -FOR v, e, p IN 2..5 OUTBOUND start @@edgeCollection -RETURN {source: start, destination: v, edges: p.edges, vertices: p.vertices} -``` - -### TRAVERSAL and TRAVERSAL_TREE - -These have been removed and should be replaced by the -[native AQL traversal](../../Manual/Graphs/Traversals/index.html). -There are many potential solutions using the new syntax, but they largely depend -on what exactly you are trying to achieve and would go beyond the scope of this -cookbook. Here is one example how to do the transition, using the -[world graph](../..//Manual/Graphs/index.html#the-world-graph) -as data: - -In 2.8, it was possible to use `TRAVERSAL()` together with a custom visitor -function to find leaf nodes in a graph. Leaf nodes are vertices that have inbound -edges, but no outbound edges. The visitor function code looked like this: - -```js -var aqlfunctions = require("org/arangodb/aql/functions"); - -aqlfunctions.register("myfunctions::leafNodeVisitor", function (config, result, vertex, path, connected) { - if (connected && connected.length === 0) { - return vertex.name + " (" + vertex.type + ")"; - } -}); -``` - -And the AQL query to make use of it: - -```js -LET params = { - order: "preorder-expander", - visitor: "myfunctions::leafNodeVisitor", - visitorReturnsResults: true -} -FOR result IN TRAVERSAL(worldVertices, worldEdges, "worldVertices/world", "inbound", params) - RETURN result -``` - -To traverse the graph starting at vertex `worldVertices/world` using native -AQL traversal and an anonymous graph, we can simply do: - -```js -FOR v IN 0..10 INBOUND "worldVertices/world" worldEdges - RETURN v -``` - -It will give us all vertex documents including the start vertex (because the -minimum depth is set to *0*). The maximum depth is set to *10*, which is enough -to follow all edges and reach the leaf nodes in this graph. - -The query can be modified to return a formatted path from first to last node: - -```js -FOR v, e, p IN 0..10 INBOUND "worldVertices/world" e - RETURN CONCAT_SEPARATOR(" -> ", p.vertices[*].name) -``` - -The result looks like this (shortened): - -```json -[ - "World", - "World -> Africa", - "World -> Africa -> Cote d'Ivoire", - "World -> Africa -> Cote d'Ivoire -> Yamoussoukro", - "World -> Africa -> Angola", - "World -> Africa -> Angola -> Luanda", - "World -> Africa -> Chad", - "World -> Africa -> Chad -> N'Djamena", - ... -] -``` - -As we can see, all possible paths of varying lengths are returned. We are not -really interested in them, but we still have to do the traversal to go from -*World* all the way to the leaf nodes (e.g. *Yamoussoukro*). To determine -if a vertex is really the last on the path in the sense of being a leaf node, -we can use another traversal of depth 1 to check if there is at least one -outgoing edge - which means the vertex is not a leaf node, otherwise it is: - -```js -FOR v IN 0..10 INBOUND "worldVertices/world" worldEdges - FILTER LENGTH(FOR vv IN INBOUND v worldEdges LIMIT 1 RETURN 1) == 0 - RETURN CONCAT(v.name, " (", v.type, ")") -``` - -Using the current vertex `v` as starting point, the second traversal is -performed. It can return early after one edge was followed (`LIMIT 1`), -because we don't need to know the exact count and it is faster this way. -We also don't need the actual vertex, so we can just `RETURN 1` as dummy -value as an optimization. The traversal (which is a sub-query) will -return an empty array in case of a leaf node, and `[ 1 ]` otherwise. -Since we only want the leaf nodes, we `FILTER` out all non-empty arrays -and what is left are the leaf nodes only. The attributes `name` and -`type` are formatted the way they were like in the original JavaScript -code, but now with AQL. The final result is a list of all capitals: - -```json -[ - "Yamoussoukro (capital)", - "Luanda (capital)", - "N'Djamena (capital)", - "Algiers (capital)", - "Yaounde (capital)", - "Ouagadougou (capital)", - "Gaborone (capital)", - "Asmara (capital)", - "Cairo (capital)", - ... -] -``` - -There is no direct substitute for the `TRAVERSAL_TREE()` function. -The advantage of this function was that its (possibly highly nested) result -data structure inherently represented the "longest" possible paths only. -With native AQL traversal, all paths from minimum to maximum traversal depth -are returned, including the "short" paths as well: - -```js -FOR v, e, p IN 1..2 INBOUND "worldVertices/continent-north-america" worldEdges - RETURN CONCAT_SEPARATOR(" <- ", p.vertices[*]._key) -``` - -```json -[ - "continent-north-america <- country-antigua-and-barbuda", - "continent-north-america <- country-antigua-and-barbuda <- capital-saint-john-s", - "continent-north-america <- country-barbados", - "continent-north-america <- country-barbados <- capital-bridgetown", - "continent-north-america <- country-canada", - "continent-north-america <- country-canada <- capital-ottawa", - "continent-north-america <- country-bahamas", - "continent-north-america <- country-bahamas <- capital-nassau" -] -``` - -A second traversal with depth = 1 can be used to check if we reached a leaf node -(no more incoming edges). Based on this information, the "short" paths can be -filtered out. Note that a second condition is required: it is possible that the -last node in a traversal is not a leaf node if the maximum traversal depth is -exceeded. Thus, we need to also let paths through, which contain as many edges -as hops we do in the traversal (here: 2). - -```js -FOR v, e, p IN 1..2 INBOUND "worldVertices/continent-north-america" worldEdges - LET other = ( - FOR vv, ee IN INBOUND v worldEdges - //FILTER ee != e // needed if traversing edges in ANY direction - LIMIT 1 - RETURN 1 - ) - FILTER LENGTH(other) == 0 || LENGTH(p.edges) == 2 - RETURN CONCAT_SEPARATOR(" <- ", p.vertices[*]._key) -``` - -```json -[ - "continent-north-america <- country-antigua-and-barbuda <- capital-saint-john-s", - "continent-north-america <- country-barbados <- capital-bridgetown", - "continent-north-america <- country-canada <- capital-ottawa", - "continent-north-america <- country-bahamas <- capital-nassau" -] -``` - -The full paths can be returned, but it is not in a tree-like structure as -with `TRAVERSAL_TREE()`. Such a data structure can be constructed on -client-side if really needed. - -```js -FOR v, e, p IN 1..2 INBOUND "worldVertices/continent-north-america" worldEdges - LET other = (FOR vv, ee IN INBOUND v worldEdges LIMIT 1 RETURN 1) - FILTER LENGTH(other) == 0 || LENGTH(p.edges) == 2 - RETURN p -``` - -Path data (shortened): - -```json -[ - { - "edges": [ - { - "_id": "worldEdges/57585025", - "_from": "worldVertices/country-antigua-and-barbuda", - "_to": "worldVertices/continent-north-america", - "type": "is-in" - }, - { - "_id": "worldEdges/57585231", - "_from": "worldVertices/capital-saint-john-s", - "_to": "worldVertices/country-antigua-and-barbuda", - "type": "is-in" - } - ], - "vertices": [ - { - "_id": "worldVertices/continent-north-america", - "name": "North America", - "type": "continent" - }, - { - "_id": "worldVertices/country-antigua-and-barbuda", - "code": "ATG", - "name": "Antigua and Barbuda", - "type": "country" - }, - { - "_id": "worldVertices/capital-saint-john-s", - "name": "Saint John's", - "type": "capital" - } - ] - }, - { - ... - } -] -``` - -The first and second vertex of the nth path are connected by the first edge -(`p[n].vertices[0]` ⟝ `p[n].edges[0]` → `p[n].vertices[1]`) and so on. This -structure might actually be more convenient to process compared to a tree-like -structure. Note that the edge documents are also included, in constrast to the -removed graph traversal function. - -Contact us via our social channels if you need further help. - -**Author:** [Michael Hackstein](https://github.com/mchacki) - -**Tags**: #howto #aql #migration diff --git a/Documentation/Books/Cookbook/AQL/MigratingGraphFunctionsTo3.md b/Documentation/Books/Cookbook/AQL/MigratingGraphFunctionsTo3.md deleted file mode 100644 index f1dc3de0a19d..000000000000 --- a/Documentation/Books/Cookbook/AQL/MigratingGraphFunctionsTo3.md +++ /dev/null @@ -1,777 +0,0 @@ -Migrating GRAPH_* Functions from 2.8 or earlier to 3.0 -====================================================== - -Problem -------- - -With the release of 3.0 all GRAPH functions have been dropped from AQL in favor of a more -native integration of graph features into the query language. I have used the old graph -functions and want to upgrade to 3.0. - -Graph functions covered in this recipe: - -* GRAPH_COMMON_NEIGHBORS -* GRAPH_COMMON_PROPERTIES -* GRAPH_DISTANCE_TO -* GRAPH_EDGES -* GRAPH_NEIGHBORS -* GRAPH_TRAVERSAL -* GRAPH_TRAVERSAL_TREE -* GRAPH_SHORTEST_PATH -* GRAPH_PATHS -* GRAPH_VERTICES - -Solution 1: Quick and Dirty (not recommended) ---------------------------------------------- - -**When to use this solution** - -I am not willing to invest a lot if time into the upgrade process and I am -willing to surrender some performance in favor of less effort. -Some constellations may not work with this solution due to the nature of -user-defined functions. -Especially check for AQL queries that do both modifications -and `GRAPH_*` functions. - -**Registering user-defined functions** - -This step has to be executed once on ArangoDB for every database we are using. - -We connect to `arangodb` with `arangosh` to issue the following commands two: - -```js -var graphs = require("@arangodb/general-graph"); -graphs._registerCompatibilityFunctions(); -``` - -These have registered all old `GRAPH_*` functions as user-defined functions again, with the prefix `arangodb::`. - -**Modify the application code** - -Next we have to go through our application code and replace all calls to `GRAPH_*` by `arangodb::GRAPH_*`. -Perform a test run of the application and check if it worked. -If it worked we are ready to go. - -**Important Information** - -The user defined functions will call translated subqueries (as described in Solution 2). -The optimizer does not know anything about these subqueries beforehand and cannot optimize the whole plan. -Also there might be read/write constellations that are forbidden in user-defined functions, therefore -a "really" translated query may work while the user-defined function work around may be rejected. - -Solution 2: Translating the queries (recommended) -------------------------------------------------- - -**When to use this solution** - -I am willing to invest some time on my queries in order to get -maximum performance, full query optimization and a better -control of my queries. No forcing into the old layout -any more. - -**Before you start** - -If you are using `vertexExamples` which are not only `_id` strings do not skip -the GRAPH_VERTICES section, because it will describe how to translate them to -AQL. All graph functions using a vertexExample are identical to executing a -GRAPH_VERTICES before and using it's result as start point. -Example with NEIGHBORS: - -``` -FOR res IN GRAPH_NEIGHBORS(@graph, @myExample) RETURN res -``` - -Is identical to: - -``` -FOR start GRAPH_VERTICES(@graph, @myExample) - FOR res IN GRAPH_NEIGHBORS(@graph, start) RETURN res -``` - -All non GRAPH_VERTICES functions will only explain the transformation for a single input document's `_id`. - -**Options used everywhere** - -**Option edgeCollectionRestriction** - -In order to use edge Collection restriction we just use the feature that the traverser -can walk over a list of edge collections directly. So the edgeCollectionRestrictions -just form this list (exampleGraphEdges): - -``` -// OLD -[..] FOR e IN GRAPH_EDGES(@graphName, @startId, {edgeCollectionRestriction: [edges1, edges2]}) RETURN e - -// NEW -[..] FOR v, e IN ANY @startId edges1, edges2 RETURN DISTINCT e._id -``` - -Note: The `@graphName` bindParameter is not used anymore and probably has to be removed from the query. - -**Option includeData** - -If we use the option includeData we simply return the object directly instead of only the _id - -Example GRAPH_EDGES: - -``` -// OLD -[..] FOR e IN GRAPH_EDGES(@graphName, @startId, {includeData: true}) RETURN e - -// NEW -[..] FOR v, e IN ANY @startId GRAPH @graphName RETURN DISTINCT e -``` - -**Option direction** - -The direction has to be placed before the start id. -Note here: The direction has to be placed as Word it cannot be handed in via a bindParameter -anymore: - -``` -// OLD -[..] FOR e IN GRAPH_EDGES(@graphName, @startId, {direction: 'inbound'}) RETURN e - -// NEW -[..] FOR v, e IN INBOUND @startId GRAPH @graphName RETURN DISTINCT e._id -``` - -**Options minDepth, maxDepth** - -If we use the options minDepth and maxDepth (both default 1 if not set) we can simply -put them in front of the direction part in the Traversal statement. - -Example GRAPH_EDGES: - -``` -// OLD -[..] FOR e IN GRAPH_EDGES(@graphName, @startId, {minDepth: 2, maxDepth: 4}) RETURN e - -// NEW -[..] FOR v, e IN 2..4 ANY @startId GRAPH @graphName RETURN DISTINCT e._id -``` - -**Option maxIteration** - -The option `maxIterations` is removed without replacement. -Your queries are now bound by main memory not by an arbitrary number of iterations. - -### GRAPH_VERTICES - -First we have to branch on the example. -There we have three possibilities: - -1. The example is an `_id` string. -2. The example is `null` or `{}`. -3. The example is a non empty object or an array. - -**Example is '_id' string** - -This is the easiest replacement. In this case we simply replace the function with a call to `DOCUMENT`: - -``` -// OLD -[..] GRAPH_VERTICES(@graphName, @idString) [..] - -// NEW -[..] DOCUMENT(@idString) [..] -``` - -NOTE: The `@graphName` is not required anymore, we may have to adjust bindParameters. - -The AQL graph features can work with an id directly, no need to call `DOCUMENT` before if we just need this to find a starting point. - -**Example is `null` or the empty object** - -This case means we use all documents from the graph. -Here we first have to now the vertex collections of the graph. - -1. If we only have one collection (say `vertices`) we can replace it with a simple iteration over this collection: - -``` -// OLD -[..] FOR v IN GRAPH_VERTICES(@graphName, {}) [..] - -// NEW -[..] FOR v IN vertices [..] -```` - -NOTE: The `@graphName` is not required anymore, we may have to adjust bindParameters. - - -2. We have more than one collection. This is the unfortunate case for a general replacement. -So in the general replacement we assume we do not want to exclude any of the collections in -the graph. Than we unfortunately have to form a `UNION`over all these collections. -Say our graph has the vertex collections `vertices1`, `vertices2`, `vertices3` we create a sub-query for -a single collection for each of them and wrap them in a call to `UNION`. - -``` -// OLD -[..] FOR v IN GRAPH_VERTICES(@graphName, {}) [..] - -// NEW -[..] -FOR v IN UNION( // We start a UNION - (FOR v IN vertices1 RETURN v), // For each vertex collection - (FOR v IN vertices2 RETURN v), // we create the same subquery - (FOR v IN vertices3 RETURN v) -) // Finish with the UNION -[..] -```` - -NOTE: If you have any more domain knowledge of your graph apply it at this point to identify which -collections are actually relevant as this `UNION` is a rather expensive operation. - -If we use the option `vertexCollectionRestriction` in the original query. The `UNION` has to be formed -by the collections in this restriction instead of ALL collections. - -**Example is a non-empty object** - -First we follow the instructions for the empty object above. -In this section we will just focus on a single collection `vertices`, the UNION for multiple collections -is again wrapped around a subquery for each of these collections built in the following way. - -Now we have to transform the example into an AQL `FILTER` statement. -Therefore we take all top-level attributes of the example and do an equal comparison with their values. -All of these comparisons are joined with an `AND` because the all have to be fulfilled. - -Example: - -``` -// OLD -[..] FOR v IN GRAPH_VERTICES(@graphName, {foo: 'bar', the: {answer: 42}}}) [..] - -// NEW -[..] FOR v IN vertices - FILTER v.foo == 'bar' // foo: bar - AND v.the == {answer: 42} //the: {answer: 42} -[..] -``` - -**Example is an array** - -The idea transformation is almost identical to a single non-empty object. -For each element in the array we create the filter conditions and than we -`OR`-combine them (mind the brackets): - -``` -// OLD -[..] FOR v IN GRAPH_VERTICES(@graphName, [{foo: 'bar', the: {answer: 42}}, {foo: 'baz'}])) [..] - -// NEW -[..] FOR v IN vertices - FILTER (v.foo == 'bar' // foo: bar - AND v.the == {answer: 42}) //the: {answer: 42} - OR (v.foo == 'baz') -[..] -``` - -### GRAPH_EDGES - -The GRAPH_EDGES can be simply replaced by a call to the AQL traversal. - -**No options** - -The default options did use a direction `ANY` and returned a distinct result of the edges. -Also it did just return the edges `_id` value. - -``` -// OLD -[..] FOR e IN GRAPH_EDGES(@graphName, @startId) RETURN e - -// NEW -[..] FOR v, e IN ANY @startId GRAPH @graphName RETURN DISTINCT e._id -``` - -**Option edgeExamples.** - -See `GRAPH_VERTICES` on how to transform examples to AQL FILTER. Apply the filter on the edge variable `e`. - -### GRAPH_NEIGHBORS - -The GRAPH_NEIGHBORS is a breadth-first-search on the graph with a global unique check for vertices. So we can replace it by a an AQL traversal with these options. - -**No options** - -The default options did use a direction `ANY` and returned a distinct result of the neighbors. -Also it did just return the neighbors `_id` value. - -``` -// OLD -[..] FOR n IN GRAPH_NEIGHBORS(@graphName, @startId) RETURN n - -// NEW -[..] FOR n IN ANY @startId GRAPH @graphName OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN n -``` - -**Option neighborExamples** - -See `GRAPH_VERTICES` on how to transform examples to AQL FILTER. Apply the filter on the neighbor variable `n`. - -**Option edgeExamples** - -See `GRAPH_VERTICES` on how to transform examples to AQL FILTER. Apply the filter on the edge variable `e`. - -However this is a bit more complicated as it interferes with the global uniqueness check. -For edgeExamples it is sufficient when any edge pointing to the neighbor matches the filter. Using `{uniqueVertices: 'global'}` first picks any edge randomly. Than it checks against this edge only. -If we know there are no vertex pairs with multiple edges between them we can use the simple variant which is save: - -``` -// OLD -[..] FOR n IN GRAPH_NEIGHBORS(@graphName, @startId, {edgeExample: {label: 'friend'}}) RETURN e - -// NEW -[..] FOR n, e IN ANY @startId GRAPH @graphName OPTIONS {bfs: true, uniqueVertices: 'global'} FILTER e.label == 'friend' RETURN n._id -``` - -If there may be multiple edges between the same pair of vertices we have to make the distinct check ourselfes and cannot rely on the traverser doing it correctly for us: - -``` -// OLD -[..] FOR n IN GRAPH_NEIGHBORS(@graphName, @startId, {edgeExample: {label: 'friend'}}) RETURN e - -// NEW -[..] FOR n, e IN ANY @startId GRAPH @graphName OPTIONS {bfs: true} FILTER e.label == 'friend' RETURN DISTINCT n._id -``` - -**Option vertexCollectionRestriction** - -If we use the vertexCollectionRestriction we have to postFilter the neighbors based on their collection. Therefore we can make use of the function `IS_SAME_COLLECTION`: - -``` -// OLD -[..] FOR n IN GRAPH_NEIGHBORS(@graphName, @startId, {vertexCollectionRestriction: ['vertices1', 'vertices2']}) RETURN e - -// NEW -[..] FOR n IN ANY @startId GRAPH @graphName OPTIONS {bfs: true, uniqueVertices: true} FILTER IS_SAME_COLLECTION(vertices1, n) OR IS_SAME_COLLECTION(vertices2, n) RETURN DISTINCT n._id -``` - -### GRAPH_COMMON_NEIGHBORS - -`GRAPH_COMMON_NEIGHBORS` is defined as two `GRAPH_NEIGHBORS` queries and than forming the `INTERSECTION` of both queries. -How to translate the options please refer to `GRAPH_NEIGHBORS`. -Finally we have to build the old result format `{left, right, neighbors}`. -If you just need parts of the result you can adapt this query to your specific needs. - -``` -// OLD -FOR v IN GRAPH_COMMON_NEIGHBORS(@graphName, 'vertices/1' , 'vertices/2', {direction : 'any'}) RETURN v - -// NEW -LET n1 = ( // Neighbors for vertex1Example - FOR n IN ANY 'vertices/1' GRAPH 'graph' OPTIONS {bfs: true, uniqueVertices: "global"} RETURN n._id - ) -LET n2 = ( // Neighbors for vertex2Example - FOR n IN ANY 'vertices/2' GRAPH 'graph' OPTIONS {bfs: true, uniqueVertices: "global"} RETURN n._id - ) -LET common = INTERSECTION(n1, n2) // Get the intersection -RETURN { // Produce the original result - left: 'vertices/1', - right: 'vertices/2, - neighbors: common -} -``` - -NOTE: If you are using examples instead of `_ids` you have to add a filter to make sure that the left is not equal to the right start vertex. -To give you an example with a single vertex collection `vertices`, the replacement would look like this: - -``` -// OLD -FOR v IN GRAPH_COMMON_NEIGHBORS(@graphName, {name: "Alice"}, {name: "Bob"}) RETURN v - -// NEW -FOR left IN vertices - FILTER left.name == "Alice" - LET n1 = (FOR n IN ANY left GRAPH 'graph' OPTIONS {bfs: true, uniqueVertices: "global"} RETURN n._id) - FOR right IN vertices - FILTER right.name == "Bob" - FILTER right != left // Make sure left is not identical to right - LET n2 = (FOR n IN ANY right GRAPH 'graph' OPTIONS {bfs: true, uniqueVertices: "global"} RETURN n._id) - LET neighbors = INTERSECTION(n1, n2) - FILTER LENGTH(neighbors) > 0 // Only pairs with shared neighbors should be returned - RETURN {left: left._id, right: right._id, neighbors: neighbors} -``` - -### GRAPH_PATHS - -This function computes all paths of the entire graph (with a given minDepth and maxDepth) as you can imagine this feature is extremely expensive and should never be used. -However paths can again be replaced by AQL traversal. -Assume we only have one vertex collection `vertices` again. - -**No options** -By default paths of length 0 to 10 are returned. And circles are not followed. - -``` -// OLD -RETURN GRAPH_PATHS('graph') - -// NEW -FOR start IN vertices -FOR v, e, p IN 0..10 OUTBOUND start GRAPH 'graph' RETURN {source: start, destination: v, edges: p.edges, vertices: p.vertices} -``` - -**followCycles** - -If this option is set we have to modify the options of the traversal by modifying the `uniqueEdges` property: - -``` -// OLD -RETURN GRAPH_PATHS('graph', {followCycles: true}) - -// NEW -FOR start IN vertices -FOR v, e, p IN 0..10 OUTBOUND start GRAPH 'graph' OPTIONS {uniqueEdges: 'none'} RETURN {source: start, destination: v, edges: p.edges, vertices: p.vertices} -``` - -### GRAPH_COMMON_PROPERTIES - -This feature involves several full-collection scans and therefore is extremely expensive. -If you really need it you can transform it with the help of `ATTRIBUTES`, `KEEP` and `ZIP`. - -**Start with single _id** - -``` -// OLD -RETURN GRAPH_COMMON_PROPERTIES('graph', "vertices/1", "vertices/2") - -// NEW -LET left = DOCUMENT("vertices/1") // get one document -LET right = DOCUMENT("vertices/2") // get the other one -LET shared = (FOR a IN ATTRIBUTES(left) // find all shared attributes - FILTER left[a] == right[a] - OR a == '_id' // always include _id - RETURN a) -FILTER LENGTH(shared) > 1 // Return them only if they share an attribute -RETURN ZIP([left._id], [KEEP(right, shared)]) // Build the result -``` - -**Start with vertexExamples** - -Again we assume we only have a single collection `vertices`. -We have to transform the examples into filters. Iterate -over vertices to find all left documents. -For each left document iterate over vertices again -to find matching right documents. -And return the shared attributes as above: - -``` -// OLD -RETURN GRAPH_COMMON_PROPERTIES('graph', {answer: 42}, {foo: "bar"}) - -// NEW -FOR left IN vertices - FILTER left.answer == 42 - LET commons = ( - FOR right IN vertices - FILTER right.foo == "bar" - FILTER left != right - LET shared = (FOR a IN ATTRIBUTES(left) - FILTER left[a] == right[a] - OR a == '_id' - RETURN a) - FILTER LENGTH(shared) > 1 - RETURN KEEP(right, shared)) - FILTER LENGTH(commons) > 0 - RETURN ZIP([left._id], [commons]) -``` - - -### GRAPH_SHORTEST_PATH - -A shortest path computation is now done via the new SHORTEST_PATH AQL statement. - -**No options** - -``` -// OLD -FOR p IN GRAPH_SHORTEST_PATH(@graphName, @startId, @targetId, {direction : 'outbound'}) RETURN p - -// NEW -LET p = ( // Run one shortest Path - FOR v, e IN OUTBOUND SHORTEST_PATH @startId TO @targetId GRAPH @graphName - // We return objects with vertex, edge and weight for each vertex on the path - RETURN {vertex: v, edge: e, weight: (IS_NULL(e) ? 0 : 1)} -) -FILTER LENGTH(p) > 0 // We only want shortest paths that actually exist -RETURN { // We rebuild the old format - vertices: p[*].vertex, - edges: p[* FILTER CURRENT.e != null].edge, - distance: SUM(p[*].weight) -} -``` - -**Options weight and defaultWeight** - -The new AQL SHORTEST_PATH offers the options `weightAttribute` and `defaultWeight`. - -``` -// OLD -FOR p IN GRAPH_SHORTEST_PATH(@graphName, @startId, @targetId, {direction : 'outbound', weight: "weight", defaultWeight: 80}) RETURN p - -// NEW -LET p = ( // Run one shortest Path - FOR v, e IN OUTBOUND SHORTEST_PATH @startId TO @targetId GRAPH @graphName - // We return objects with vertex, edge and weight for each vertex on the path - RETURN {vertex: v, edge: e, weight: (IS_NULL(e) ? 0 : (IS_NUMBER(e.weight) ? e.weight : 80))} -) -FILTER LENGTH(p) > 0 // We only want shortest paths that actually exist -RETURN { // We rebuild the old format - vertices: p[*].vertex, - edges: p[* FILTER CURRENT.e != null].edge, - distance: SUM(p[*].weight) // We have to recompute the distance if we need it -} -``` - - -### GRAPH_DISTANCE_TO - -Graph distance to only differs by the result format from `GRAPH_SHORTEST_PATH`. -So we follow the transformation for `GRAPH_SHORTEST_PATH`, remove some unnecessary parts, -and change the return format - -``` -// OLD -FOR p IN GRAPH_DISTANCE_TO(@graphName, @startId, @targetId, {direction : 'outbound'}) RETURN p - -// NEW -LET p = ( // Run one shortest Path - FOR v, e IN OUTBOUND SHORTEST_PATH @startId TO @targetId GRAPH @graphName - // DIFFERENCE we only return the weight for each edge on the path - RETURN IS_NULL(e) ? 0 : 1} -) -FILTER LENGTH(p) > 0 // We only want shortest paths that actually exist -RETURN { // We rebuild the old format - startVertex: @startId, - vertex: @targetId, - distance: SUM(p[*].weight) -} -``` - -### GRAPH_TRAVERSAL and GRAPH_TRAVERSAL_TREE - -These have been removed and should be replaced by the -[native AQL traversal](../../Manual/Graphs/Traversals/index.html). -There are many potential solutions using the new syntax, but they largely depend -on what exactly you are trying to achieve and would go beyond the scope of this -cookbook. Here is one example how to do the transition, using the -[world graph](../../Manual/Graphs/index.html#the-world-graph) -as data: - -In 2.8, it was possible to use `GRAPH_TRAVERSAL()` together with a custom visitor -function to find leaf nodes in a graph. Leaf nodes are vertices that have inbound -edges, but no outbound edges. The visitor function code looked like this: - -```js -var aqlfunctions = require("org/arangodb/aql/functions"); - -aqlfunctions.register("myfunctions::leafNodeVisitor", function (config, result, vertex, path, connected) { - if (connected && connected.length === 0) { - return vertex.name + " (" + vertex.type + ")"; - } -}); -``` - -And the AQL query to make use of it: - -```js -LET params = { - order: "preorder-expander", - visitor: "myfunctions::leafNodeVisitor", - visitorReturnsResults: true -} -FOR result IN GRAPH_TRAVERSAL("worldCountry", "worldVertices/world", "inbound", params) - RETURN result -``` - -To traverse the graph starting at vertex `worldVertices/world` using native -AQL traversal and a named graph, we can simply do: - -```js -FOR v IN 0..10 INBOUND "worldVertices/world" GRAPH "worldCountry" - RETURN v -``` - -It will give us all vertex documents including the start vertex (because the -minimum depth is set to *0*). The maximum depth is set to *10*, which is enough -to follow all edges and reach the leaf nodes in this graph. - -The query can be modified to return a formatted path from first to last node: - -```js -FOR v, e, p IN 0..10 INBOUND "worldVertices/world" GRAPH "worldCountry" - RETURN CONCAT_SEPARATOR(" -> ", p.vertices[*].name) -``` - -The result looks like this (shortened): - -```json -[ - "World", - "World -> Africa", - "World -> Africa -> Cote d'Ivoire", - "World -> Africa -> Cote d'Ivoire -> Yamoussoukro", - "World -> Africa -> Angola", - "World -> Africa -> Angola -> Luanda", - "World -> Africa -> Chad", - "World -> Africa -> Chad -> N'Djamena", - ... -] -``` - -As we can see, all possible paths of varying lengths are returned. We are not -really interested in them, but we still have to do the traversal to go from -*World* all the way to the leaf nodes (e.g. *Yamoussoukro*). To determine -if a vertex is really the last on the path in the sense of being a leaf node, -we can use another traversal of depth 1 to check if there is at least one -outgoing edge - which means the vertex is not a leaf node, otherwise it is: - -```js -FOR v IN 0..10 INBOUND "worldVertices/world" GRAPH "worldCountry" - FILTER LENGTH(FOR vv IN INBOUND v GRAPH "worldCountry" LIMIT 1 RETURN 1) == 0 - RETURN CONCAT(v.name, " (", v.type, ")") -``` - -Using the current vertex `v` as starting point, the second traversal is -performed. It can return early after one edge was followed (`LIMIT 1`), -because we don't need to know the exact count and it is faster this way. -We also don't need the actual vertex, so we can just `RETURN 1` as dummy -value as an optimization. The traversal (which is a sub-query) will -return an empty array in case of a leaf node, and `[ 1 ]` otherwise. -Since we only want the leaf nodes, we `FILTER` out all non-empty arrays -and what is left are the leaf nodes only. The attributes `name` and -`type` are formatted the way they were like in the original JavaScript -code, but now with AQL. The final result is a list of all capitals: - -```json -[ - "Yamoussoukro (capital)", - "Luanda (capital)", - "N'Djamena (capital)", - "Algiers (capital)", - "Yaounde (capital)", - "Ouagadougou (capital)", - "Gaborone (capital)", - "Asmara (capital)", - "Cairo (capital)", - ... -] -``` - -There is no direct substitute for the `GRAPH_TRAVERSAL_TREE()` function. -The advantage of this function was that its (possibly highly nested) result -data structure inherently represented the "longest" possible paths only. -With native AQL traversal, all paths from minimum to maximum traversal depth -are returned, including the "short" paths as well: - -```js -FOR v, e, p IN 1..2 INBOUND "worldVertices/continent-north-america" GRAPH "worldCountry" - RETURN CONCAT_SEPARATOR(" <- ", p.vertices[*]._key) -``` - -```json -[ - "continent-north-america <- country-antigua-and-barbuda", - "continent-north-america <- country-antigua-and-barbuda <- capital-saint-john-s", - "continent-north-america <- country-barbados", - "continent-north-america <- country-barbados <- capital-bridgetown", - "continent-north-america <- country-canada", - "continent-north-america <- country-canada <- capital-ottawa", - "continent-north-america <- country-bahamas", - "continent-north-america <- country-bahamas <- capital-nassau" -] -``` - -A second traversal with depth = 1 can be used to check if we reached a leaf node -(no more incoming edges). Based on this information, the "short" paths can be -filtered out. Note that a second condition is required: it is possible that the -last node in a traversal is not a leaf node if the maximum traversal depth is -exceeded. Thus, we need to also let paths through, which contain as many edges -as hops we do in the traversal (here: 2). - -```js -FOR v, e, p IN 1..2 INBOUND "worldVertices/continent-north-america" GRAPH "worldCountry" - LET other = ( - FOR vv, ee IN INBOUND v GRAPH "worldCountry" - //FILTER ee != e // needed if traversing edges in ANY direction - LIMIT 1 - RETURN 1 - ) - FILTER LENGTH(other) == 0 || LENGTH(p.edges) == 2 - RETURN CONCAT_SEPARATOR(" <- ", p.vertices[*]._key) -``` - -```json -[ - "continent-north-america <- country-antigua-and-barbuda <- capital-saint-john-s", - "continent-north-america <- country-barbados <- capital-bridgetown", - "continent-north-america <- country-canada <- capital-ottawa", - "continent-north-america <- country-bahamas <- capital-nassau" -] -``` - -The full paths can be returned, but it is not in a tree-like structure as -with `GRAPH_TRAVERSAL_TREE()`. Such a data structure can be constructed on -client-side if really needed. - -```js -FOR v, e, p IN 1..2 INBOUND "worldVertices/continent-north-america" GRAPH "worldCountry" - LET other = (FOR vv, ee IN INBOUND v GRAPH "worldCountry" LIMIT 1 RETURN 1) - FILTER LENGTH(other) == 0 || LENGTH(p.edges) == 2 - RETURN p -``` - -Path data (shortened): - -```json -[ - { - "edges": [ - { - "_id": "worldEdges/57585025", - "_from": "worldVertices/country-antigua-and-barbuda", - "_to": "worldVertices/continent-north-america", - "type": "is-in" - }, - { - "_id": "worldEdges/57585231", - "_from": "worldVertices/capital-saint-john-s", - "_to": "worldVertices/country-antigua-and-barbuda", - "type": "is-in" - } - ], - "vertices": [ - { - "_id": "worldVertices/continent-north-america", - "name": "North America", - "type": "continent" - }, - { - "_id": "worldVertices/country-antigua-and-barbuda", - "code": "ATG", - "name": "Antigua and Barbuda", - "type": "country" - }, - { - "_id": "worldVertices/capital-saint-john-s", - "name": "Saint John's", - "type": "capital" - } - ] - }, - { - ... - } -] -``` - -The first and second vertex of the nth path are connected by the first edge -(`p[n].vertices[0]` ⟝ `p[n].edges[0]` → `p[n].vertices[1]`) and so on. This -structure might actually be more convenient to process compared to a tree-like -structure. Note that the edge documents are also included, in contrast to the -removed graph traversal function. - -Contact us via our social channels if you need further help. - -**Author:** [Michael Hackstein](https://github.com/mchacki) - -**Tags**: #howto #aql #migration diff --git a/Documentation/Books/Cookbook/AQL/MigratingMeasurementsTo3.md b/Documentation/Books/Cookbook/AQL/MigratingMeasurementsTo3.md deleted file mode 100644 index 1684683892a9..000000000000 --- a/Documentation/Books/Cookbook/AQL/MigratingMeasurementsTo3.md +++ /dev/null @@ -1,98 +0,0 @@ -Migrating GRAPH_* Measurements from 2.8 or earlier to 3.0 -========================================================= - -Problem -------- - -With the release of 3.0 all GRAPH functions have been dropped from AQL in favor of a more -native integration of graph features into the query language. I have used the old graph -functions and want to upgrade to 3.0. - -Graph functions covered in this recipe: - -* GRAPH_ABSOLUTE_BETWEENNESS -* GRAPH_ABSOLUTE_CLOSENESS -* GRAPH_ABSOLUTE_ECCENTRICITY -* GRAPH_BETWEENNESS -* GRAPH_CLOSENESS -* GRAPH_DIAMETER -* GRAPH_ECCENTRICITY -* GRAPH_RADIUS - -Solution 1: User Defined Funtions ---------------------------------- - -### Registering user-defined functions - -This step has to be executed once on ArangoDB for every database we are using. - -We connect to `arangodb` with `arangosh` to issue the following commands two: - -```js -var graphs = require("@arangodb/general-graph"); -graphs._registerCompatibilityFunctions(); -``` - -These have registered all old `GRAPH_*` functions as user-defined functions again, with the prefix `arangodb::`. - -### Modify the application code - -Next we have to go through our application code and replace all calls to `GRAPH_*` by `arangodb::GRAPH_*`. -Now run a testrun of our application and check if it worked. -If it worked we are ready to go. - -### Important Information - -The user defined functions will call translated subqueries (as described in Solution 2). -The optimizer does not know anything about these subqueries beforehand and cannot optimize the whole plan. -Also there might be read/write constellations that are forbidden in user-defined functions, therefore -a "really" translated query may work while the user-defined function work around may be rejected. - -Solution 2: Foxx (recommended) ------------------------------- - -The general graph module still offers the measurement functions. -As these are typically computation expensive and create long running queries it is recommended -to not use them in combination with other AQL features. -Therefore the best idea is to offer these measurements directly via an API using Foxx. - -First we create a new [Foxx service](../../Manual/Foxx/index.html). -Then we include the `general-graph` module in the service. -For every measurement we need we simply offer a GET route to read this measurement. - -As an example we do the `GRAPH_RADIUS`: - -``` -/// ADD FOXX CODE ABOVE - -const joi = require('joi'); -const createRouter = require('@arangodb/foxx/router'); -const dd = require('dedent'); -const router = createRouter(); - -const graphs = require("@arangodb/general-graph"); - -router.get('/radius/:graph', function(req, res) { - let graph; - - // Load the graph - try { - graph = graphs._graph(req.graph); - } catch (e) { - res.throw('not found'); - } - res.json(graphs._radius()); // Return the radius -}) -.pathParam('graph', joi.string().required(), 'The name of the graph') -.error('not found', 'Graph with this name does not exist.') -.summary('Compute the Radius') -.description(dd` - This function computes the radius of the given graph - and returns it. -`); -``` - - -**Author:** [Michael Hackstein](https://github.com/mchacki) - -**Tags**: #howto #aql #migration diff --git a/Documentation/Books/Cookbook/AQL/MultilineQueryStrings.md b/Documentation/Books/Cookbook/AQL/MultilineQueryStrings.md deleted file mode 100644 index 401c925e9303..000000000000 --- a/Documentation/Books/Cookbook/AQL/MultilineQueryStrings.md +++ /dev/null @@ -1,171 +0,0 @@ -Writing multi-line AQL queries -============================== - -Problem -------- - -I want to write an AQL query that spans multiple lines in my JavaScript source code, -but it does not work. How to do this? - -Solution --------- - -AQL supports multi-line queries, and the AQL editor in ArangoDB's web interface supports -them too. - -When issued programmatically, multi-line queries can be a source of errors, at least in -some languages. For example, JavaScript is notoriously bad at handling multi-line (JavaScript) -statements, and until recently it had no support for multi-line strings. - -In JavaScript, there are three ways of writing a multi-line AQL query in the source code: - -- string concatenation -- ES6 template strings -- query builder - -Which method works best depends on a few factors, but is often enough a simple matter of preference. -Before deciding on any, please make sure to read the recipe for [avoiding parameter injection](AvoidingInjection.md) -too. - -### String concatenation - -We want the query `FOR doc IN collection FILTER doc.value == @what RETURN doc` to become -more legible in the source code. - -Simply splitting the query string into three lines will leave us with a parse error in -JavaScript: - -```js -/* will not work */ -var query = "FOR doc IN collection - FILTER doc.value == @what - RETURN doc"; -``` - -Instead, we could do this: - -```js -var query = "FOR doc IN collection " + - "FILTER doc.value == @what " + - "RETURN doc"; -``` - -This is perfectly valid JavaScript, but it's error-prone. People have spent ages on finding -subtle bugs in their queries because they missed a single whitespace character at the -beginning or start of some line. - -Please note that when assembling queries via string concatenation, you should still use -bind parameters (as done above with `@what`) and not insert user input values into the -query string without sanitation. - -### ES6 template strings - -ES6 template strings are easier to get right and also look more elegant. They can be used -inside ArangoDB since version 2.5. but some other platforms don't support them et. -For example, they can't be used in IE and older node.js versions. So use them if your -environment supports them and your code does not need to run on any non-ES6 environments. - -Here's the query string declared via an ES6 template string (note that the string must -be enclosed in backticks now): - -```js -var query = `FOR doc IN collection - FILTER doc.value == @what - RETURN doc`; -``` -The whitespace in the template string-variant is much easier to get right than when doing -the string concatenation. - -There are a few things to note regarding template strings: - -- ES6 template strings can be used to inject JavaScript values into the string dynamically. - Substitutions start with the character sequence `${`. Care must be taken if this sequence - itself is used inside the AQL query string (currently this would be invalid AQL, but this - may change in future ArangoDB versions). Additionally, any values injected into the query - string using parameter substitutions will not be escaped correctly automatically, so again - special care must be taken when using this method to keep queries safe from parameter - injection. - -- a multi-line template string will actually contain newline characters. This is not necessarily - the case when doing string concatenation. In the string concatenation example, we used - three lines of source code to create a single-line query string. We could have inserted - newlines into the query string there too, but we didn't. Just to point out that the two - variants will not create bytewise-identical query strings. - -Please note that when using ES6 template strings for your queries, you should still use -bind parameters (as done above with `@what`) and not insert user input values into the -query string without sanitation. - -There is a convenience function `aql` which can be used to safely -and easily build an AQL query with substitutions from arbitrary JavaScript values and -expressions. It can be invoked like this: - -```js -const aql = require("@arangodb").aql; // not needed in arangosh - -var what = "some input value"; -var query = aql`FOR doc IN collection - FILTER doc.value == ${what} - RETURN doc`; -``` - -The template string variant that uses `aql` is both convenient and safe. Internally, it -will turn the substituted values into bind parameters. The query string and the bind parameter -values will be returned separately, so the result of `query` above will be something like: - -```js -{ - "query" : "FOR doc IN collection FILTER doc.value == @value0 RETURN doc", - "bindVars" : { - "value0" : "some input value" - } -} -``` - -### Query builder - -ArangoDB comes bundled with a query builder named [aqb](https://www.npmjs.com/package/aqb). -That query builder can be used to programmatically construct AQL queries, without having -to write query strings at all. - -Here's an example of its usage: - -```js -var qb = require("aqb"); - -var jobs = db._createStatement({ - query: ( - qb.for('job').in('_jobs') - .filter( - qb('pending').eq('job.status') - .and(qb.ref('@queue').eq('job.queue')) - .and(qb.ref('@now').gte('job.delayUntil')) - ) - .sort('job.delayUntil', 'ASC') - .limit('@max') - .return('job') - ), - bindVars: { - queue: queue._key, - now: Date.now(), - max: queue.maxWorkers - numBusy - } -}).execute().toArray(); -``` - -As can be seen, aqb provides a fluent API that allows chaining function calls for -creating the individual query operations. This has a few advantages: - -- flexibility: there is no query string in the source code, so the code can be formatted - as desired without having to bother about strings -- validation: the query can be validated syntactically by aqb before being actually executed - by the server. Testing of queries also becomes easier. Additionally, some IDEs may - provide auto-completion to some extend and thus aid development -- security: built-in separation of query operations (e.g. `FOR`, `FILTER`, `SORT`, `LIMIT`) - and dynamic values (e.g. user input values) - -aqb can be used inside ArangoDB and from node.js and even from within browsers. - -**Authors**: [Jan Steemann](https://github.com/jsteemann) - -**Tags**: #aql #aqb #es6 diff --git a/Documentation/Books/Cookbook/AQL/README.md b/Documentation/Books/Cookbook/AQL/README.md deleted file mode 100644 index abe384e2869c..000000000000 --- a/Documentation/Books/Cookbook/AQL/README.md +++ /dev/null @@ -1,27 +0,0 @@ -AQL -=== - -Using AQL in general --------------------- - -- [Using Joins in AQL](Joins.md) - -- [Using Dynamic Attribute Names](DynamicAttributeNames.md) - -- [Creating Test-data using AQL](CreatingTestData.md) - -- [Diffing Documents](DiffingDocuments.md) - -- [Avoiding Parameter Injection](AvoidingInjection.md) - -- [Multiline Query Strings](Joins.md) - - -Migrating from 2.x to 3.0 -------------------------- - -- [Migrating named graph functions to 3.0](MigratingGraphFunctionsTo3.md) - -- [Migrating anonymous graph functions to 3.0](MigratingEdgeFunctionsTo3.md) - -- [Migrating graph measurements to 3.0](MigratingMeasurementsTo3.md) diff --git a/Documentation/Books/Cookbook/AccessingShapesData.md b/Documentation/Books/Cookbook/AccessingShapesData.md deleted file mode 100644 index cde23f068500..000000000000 --- a/Documentation/Books/Cookbook/AccessingShapesData.md +++ /dev/null @@ -1,120 +0,0 @@ -# Accessing Shapes Data - -## Problem -Documents in a collection may have different shapes associated with them. There is no way to query the shapes data directly. So how do you solve this problem? - -## Solution -There are two possible ways to do this. - -*A) The fast way with some random samplings:* - -1. Ask for a random document (`db..any()`) and note its top-level attribute names -2. Repeat this for at least 10 times. After that repeat it only if you think it's worth it. - -Following is an example of an implementation: - -```js -attributes(db.myCollection); - - -function attributes(collection) { - "use strict" - - var probes = 10; - var maxRounds = 3; - var threshold = 0.5; - - var maxDocuments = collection.count(); - - if (maxDocuments < probes) { - probes = maxDocuments; - } - - if (probes === 0) { - return [ ]; - } - - var attributes = { }; - - while (maxRounds--) { - var newDocuments = 0; - var n = probes; - while (n--) { - var doc = collection.any(); - var found = false; - var keys = Object.keys(doc); - - for (var i = 0; i < keys.length; ++i) { - if (attributes.hasOwnProperty(keys[i])) { - ++attributes[keys[i]]; - } - else { - attributes[keys[i]] = 1; - found = true; - } - } - - if (found) { - ++newDocuments; - } - } - - if (newDocuments / probes <= threshold) { - break; - } - } - - return Object.keys(attributes); -} -``` - -*B) The way to find all top-level attributes* - -If you don't mind to make some extra inserts and you don't care about deletion or updates of documents you can use the following: - -```js -db._create("mykeys"); -db.mykeys.ensureUniqueSkiplist("attribute"); - - -function insert(collection, document) { - var result = collection.save(document); - - try { - var keys = Objects.keys(document); - - for (i = 0; i < keys.length; ++i) { - try { - db.mykeys.save({ attribute: keys[i] }); - } - catch (err1) { - // potential unique key constraint violations - } - } - } - catch (err2) { - } - - return result; -} -``` - -## Comment - -*A) The fast way with some random samplings:* - -You get some random sampling with bounded complexity. -If you have a variety of attributes you should repeat the procedure more than 10 times. - -The procedure can be implemented as a server side action. - -*B) The way to find all top-level attributes*: - -This procedure will not care about updates or deletions of documents. -Also only the top-level attribute of the documents will be inserted and nested one ignored. - -The procedure can be implemented as a server side action. - -**Author:** [Arangodb](https://github.com/arangodb) - -**Tags:** #collection #database \ No newline at end of file diff --git a/Documentation/Books/Cookbook/Administration/Authentication.md b/Documentation/Books/Cookbook/Administration/Authentication.md deleted file mode 100644 index 39c21c0e78e6..000000000000 --- a/Documentation/Books/Cookbook/Administration/Authentication.md +++ /dev/null @@ -1,114 +0,0 @@ -Using authentication -==================== - -Problem -------- - -I want to use authentication in ArangoDB. - -Solution --------- - -In order to make authentication work properly, you will need to create user accounts first. - -Then adjust ArangoDB's configuration and turn on authentication (if it's off). - -### Set up or adjust user accounts - -ArangoDB user accounts are valid throughout a server instance and users can be granted -access to one or more databases. They are managed through the database named `_system`. - -To manage user accounts, connect with the ArangoShell to the ArangoDB host and the -`_system` database: - -``` -$ arangosh --server.endpoint tcp://127.0.0.1:8529 --server.database "_system" -``` - -By default, arangosh will connect with a username `root` and an empty password. This -will work if authentication is turned off. - -When connected, you can create a new user account with the following command: - -``` -arangosh> require("org/arangodb/users").save("myuser", "mypasswd"); -``` - -`myuser` will be the username and `mypasswd` will be the user's password. Note that running -the command like this may store the password literally in ArangoShell's history. - -To avoid that, use a dynamically created password, e.g.: - -``` -arangosh> passwd = require("internal").genRandomAlphaNumbers(20); -arangosh> require("org/arangodb/users").save("myuser", passwd); -``` - -The above will print the password on screen (so you can memorize it) but won't store -it in the command history. - -While there, you probably want to change the password of the default `root` user too. -Otherwise one will be able to connect with the default `root` user and its -empty password. The following commands change the `root` user's password: - -``` -arangosh> passwd = require("internal").genRandomAlphaNumbers(20); -arangosh> require("org/arangodb/users").update("root", passwd); -``` - -### Turn on authentication - -Authentication is turned on by default in ArangoDB. You should make sure that it was -not turned off manually however. Check the configuration file (normally named -`/etc/arangodb.conf`) and make sure it contains the following line in the `server` section: - -``` -authentication = true -``` - -This will make ArangoDB require authentication for every request (including requests to -Foxx apps). - -If you want to run Foxx apps without HTTP authentcation, but activate HTTP authentication -for the built-in server APIs, you can add the following line in the `server` section of -the configuration: - -``` -authentication-system-only = true -``` - -The above will bypass authentication for requests to Foxx apps. - -When finished making changes, you need to restart ArangoDB: - -``` -service arangodb restart -``` - -### Check accessibility - -To confirm authentication is in effect, try connecting to ArangoDB with the ArangoShell: - -``` -$ arangosh --server.endpoint tcp://127.0.0.1:8529 --server.database "_system" -``` - -The above will implicity use a username `root` and an empty password when connecting. If -you changed the password of the `root` account as described above, this should not work anymore. - -You should also validate that you can connect with a valid user: - -``` -$ arangosh --server.endpoint tcp://127.0.0.1:8529 --server.database "_system" --server.username myuser -``` - -You can also use curl to check that you are actually getting HTTP 401 (Unauthorized) server -responses for requests that require authentication: - -``` -$ curl --dump - http://127.0.0.1:8529/_api/version -``` - -**Author:** [Jan Steemann](https://github.com/jsteemann) - -**Tags**: #authentication #security diff --git a/Documentation/Books/Cookbook/Administration/ImportingData.md b/Documentation/Books/Cookbook/Administration/ImportingData.md deleted file mode 100644 index fa7c0588f7ef..000000000000 --- a/Documentation/Books/Cookbook/Administration/ImportingData.md +++ /dev/null @@ -1,200 +0,0 @@ -Importing data -============== - -Problem -------- - -I want to import data from a file into ArangoDB. - -Solution --------- - -ArangoDB comes with a command-line tool utility named `arangoimport`. This utility can be -used for importing JSON-encoded, CSV, and tab-separated files into ArangoDB. - -`arangoimport` needs to be invoked from the command-line once for each import file. -The target collection can already exist or can be created by the import run. - -### Importing JSON-encoded data - -#### Input formats - -There are two supported input formats for importing JSON-encoded data into ArangoDB: - -- **line-by-line format**: This format expects each line in the input file to be a valid - JSON objects. No line breaks must occur within each single JSON object - -- **array format**: Expects a file containing a single array of JSON objects. Whitespace is - allowed for formatting inside the JSON array and the JSON objects - -Here's an example for the **line-by-line format** looks like this: - -```js -{"author":"Frank Celler","time":"2011-10-26 08:42:49 +0200","sha":"c413859392a45873936cbe40797970f8eed93ff9","message":"first commit","user":"f.celler"} -{"author":"Frank Celler","time":"2011-10-26 21:32:36 +0200","sha":"10bb77b8cc839201ff59a778f0c740994083c96e","message":"initial release","user":"f.celler"} -... -``` - -Here's an example for the same data in **array format**: - -```js -[ - { - "author": "Frank Celler", - "time": "2011-10-26 08:42:49 +0200", - "sha": "c413859392a45873936cbe40797970f8eed93ff9", - "message": "first commit", - "user": "f.celler" - }, - { - "author": "Frank Celler", - "time": "2011-10-26 21:32:36 +0200", - "sha": "10bb77b8cc839201ff59a778f0c740994083c96e", - "message": "initial release", - "user": "f.celler" - }, - ... -] -``` - -#### Importing JSON data in line-by-line format - -An example data file in **line-by-line format** can be downloaded -[here](http://jsteemann.github.io/downloads/code/git-commits-single-line.json). The example -file contains all the commits to the ArangoDB repository as shown by `git log --reverse`. - -The following commands will import the data from the file into a collection named `commits`: - -```bash -# download file -wget http://jsteemann.github.io/downloads/code/git-commits-single-line.json - -# actually import data -arangoimport --file git-commits-single-line.json --collection commits --create-collection true -``` - -Note that no file type has been specified when `arangoimport` was invoked. This is because `json` -is its default input format. - -The other parameters used have the following meanings: - -- `file`: input filename -- `collection`: name of the target collection -- `create-collection`: whether or not the collection should be created if it does not exist - -The result of the import printed by `arangoimport` should be: - -``` -created: 20039 -warnings/errors: 0 -total: 20039 -``` - -The collection `commits` should now contain the example commit data as present in the input file. - -#### Importing JSON data in array format - -An example input file for the **array format** can be found [here](http://jsteemann.github.io/downloads/code/git-commits-array.json). - -The command for importing JSON data in **array format** is similar to what we've done before: - -```bash -# download file -wget http://jsteemann.github.io/downloads/code/git-commits-array.json - -# actually import data -arangoimport --file git-commits-array.json --collection commits --create-collection true -``` - -Though the import command is the same (except the filename), there is a notable difference between the -two JSON formats: for the **array format**, `arangoimport` will read and parse the JSON in its entirety -before it sends any data to the ArangoDB server. That means the whole input file must fit into -`arangoimport`'s buffer. By default, `arangoimport` will allocate a 16 MiB internal buffer, and input files bigger -than that will be rejected with the following message: - -``` -import file is too big. please increase the value of --batch-size (currently 16777216). -``` - -So for JSON input files in **array format** it might be necessary to increase the value of `--batch-size` -in order to have the file imported. Alternatively, the input file can be converted to **line-by-line format** -manually. - - -### Importing CSV data - -Data can also be imported from a CSV file. An example file can be found [here](http://jsteemann.github.io/downloads/code/git-commits.csv). - -The `--type` parameter for the import command must now be set to `csv`: - -```bash -# download file -wget http://jsteemann.github.io/downloads/code/git-commits.csv - -# actually import data -arangoimport --file git-commits.csv --type csv --collection commits --create-collection true -``` - -For the CSV import, the first line in the input file has a special meaning: every value listed in the -first line will be treated as an attribute name for the values in all following lines. All following -lines should also have the same number of "columns". - -"columns" inside the CSV input file can be left empty though. If a "column" is left empty in a line, -then this value will be omitted for the import so the respective attribute will not be set in the imported -document. Note that values from the input file that are enclosed in double quotes will always be imported as -strings. To import numeric values, boolean values or the `null` value, don't enclose these values in quotes in -the input file. Note that leading zeros in numeric values will be removed. Importing numbers with leading -zeros will only work when putting the numbers into strings. - -Here is an example CSV file: - -```plain -"author","time","sha","message" -"Frank Celler","2011-10-26 08:42:49 +0200","c413859392a45873936cbe40797970f8eed93ff9","first commit" -"Frank Celler","2011-10-26 21:32:36 +0200","10bb77b8cc839201ff59a778f0c740994083c96e","initial release" -... -``` - -`arangoimport` supports Windows (CRLF) and Unix (LF) line breaks. Line breaks might also occur inside values -that are enclosed with the quote character. - -The default separator for CSV files is the comma. It can be changed using the `--separator` parameter -when invoking `arangoimport`. The quote character defaults to the double quote (**"**). To use a literal double -quote inside a "column" in the import data, use two double quotes. To change the quote character, use the -`--quote` parameter. To use a backslash for escaping quote characters, please set the option `--backslash-escape` -to `true`. - - -### Changing the database and server endpoint - -By default, `arangoimport` will connect to the default database on `127.0.0.1:8529` with a user named -`root`. To change this, use the following parameters: - -- `server.database`: name of the database to use when importing (default: `_system`) -- `server.endpoint`: address of the ArangoDB server (default: `tcp://127.0.0.1:8529`) - - -### Using authentication - -`arangoimport` will by default send an username `root` and an empty password to the ArangoDB -server. This is ArangoDB's default configuration, and it should be changed. To make `arangoimport` -use a different username or password, the following command-line arguments can be used: - -- `server.username`: username, used if authentication is enabled on server -- `server.password`: password for user, used if authentication is enabled on server - -The password argument can also be omitted in order to avoid having it saved in the shell's -command-line history. When specifying a username but omitting the password parameter, -`arangoimport` will prompt for a password. - - -### Additional parameters - -By default, `arangoimport` will import data into the specified collection but will not touch -existing data. Often it is convenient to first remove all data from a collection and then run -the import. `arangoimport` supports this with the optional `--overwrite` flag. When setting it to -`true`, all documents in the collection will be removed prior to the import. - -**Author:** [Jan Steemann](https://github.com/jsteemann) - -**Tags**: #arangoimport #import diff --git a/Documentation/Books/Cookbook/Administration/Migrate2.8to3.0.md b/Documentation/Books/Cookbook/Administration/Migrate2.8to3.0.md deleted file mode 100644 index e906b443961b..000000000000 --- a/Documentation/Books/Cookbook/Administration/Migrate2.8to3.0.md +++ /dev/null @@ -1,109 +0,0 @@ -Migration from ArangoDB 2.8 to 3.0 -================================== - -Problem -------- - -I want to use ArangoDB 3.0 from now on but I still have data in ArangoDB 2.8. -I need to migrate my data. I am running an ArangoDB 3.0 cluster (and -possibly a cluster with ArangoDB 2.8 as well). - -Solution --------- - -The internal data format changed completely from ArangoDB 2.8 to 3.0, -therefore you have to dump all data using `arangodump` and then -restore it to the new ArangoDB instance using `arangorestore`. - -General instructions for this procedure can be found -[in the manual](../../Manual/Upgrading/VersionSpecific/Upgrading30.html). -Here, we cover some additional details about the cluster case. - -### Dumping the data in ArangoDB 2.8 - -Basically, dumping the data works with the following command (use `arangodump` -from your ArangoDB 2.8 distribution!): - - arangodump --server.endpoint tcp://localhost:8530 --output-directory dump - -or a variation of it, for details see the above mentioned manual page and -[this section](https://docs.arangodb.com/2.8/HttpBulkImports/Arangodump.html). -If your ArangoDB 2.8 instance is a cluster, simply use one of the -coordinator endpoints as the above `--server.endpoint`. - -### Restoring the data in ArangoDB 3.0 - -The output consists of JSON files in the output directory, two for each -collection, one for the structure and one for the data. The data format -is 100% compatible with ArangoDB 3.0, except that ArangoDB 3.0 has -an additional option in the structure files for synchronous replication, -namely the attribute `replicationFactor`, which is used to specify, -how many copies of the data for each shard are kept in the cluster. - -Therefore, you can simply use this command (use the `arangorestore` from -your ArangoDB 3.0 distribution!): - - arangorestore --server.endpoint tcp://localhost:8530 --input-directory dump - -to import your data into your new ArangoDB 3.0 instance. See -[this page](../../Manual/Programs/Arangorestore/index.html) -for details on the available command line options. If your ArangoDB 3.0 -instance is a cluster, then simply use one of the coordinators as -`--server.endpoint`. - -That is it, your data is migrated. - -### Controlling the number of shards and the replication factor - -This procedure works for all four combinations of single server and cluster -for source and destination respectively. If the target is a single server -all simply works. - -So it remains to explain how one controls the number of shards and the -replication factor if the destination is a cluster. - -If the source was a cluster, `arangorestore` will use the same number -of shards as before, if you do not tell it otherwise. Since ArangoDB 2.8 -does not have synchronous replication, it does not produce dumps -with the `replicationFactor` attribute, and so `arangorestore` will -use replication factor 1 for all collections. If the source was a -single server, the same will happen, additionally, `arangorestore` -will always create collections with just a single shard. - -There are essentially 3 ways to change this behavior: - - 1. The first is to create the collections explicitly on the - ArangoDB 3.0 cluster, and then set the `--create-collection false` flag. - In this case you can control the number of shards and the replication - factor for each collection individually when you create them. - 2. The second is to use `arangorestore`'s options - `--default-number-of-shards` and `--default-replication-factor` - (this option was introduced in Version 3.0.2) - respectively to specify default values, which are taken if the - dump files do not specify numbers. This means that all such - restored collections will have the same number of shards and - replication factor. - 3. If you need more control you can simply edit the structure files - in the dump. They are simply JSON files, you can even first - use a JSON pretty printer to make editing easier. For the - replication factor you simply have to add a `replicationFactor` - attribute to the `parameters` subobject with a numerical value. - For the number of shards, locate the `shards` subattribute of the - `parameters` attribute and edit it, such that it has the right - number of attributes. The actual names of the attributes as well - as their values do not matter. Alternatively, add a `numberOfShards` - attribute to the `parameters` subobject, this will override the - `shards` attribute (this possibility was introduced in Version - 3.0.2). - -Note that you can remove individual collections from your dump by -deleting their pair of structure and data file in the dump directory. -In this way you can restore your data in several steps or even -parallelize the restore operation by running multiple `arangorestore` -processes concurrently on different dump directories. You should -consider using different coordinators for the different `arangorestore` -processes in this case. - -All these possibilities together give you full control over the sharding -layout of your data in the new ArangoDB 3.0 cluster. - diff --git a/Documentation/Books/Cookbook/Administration/NSISSilentMode.md b/Documentation/Books/Cookbook/Administration/NSISSilentMode.md deleted file mode 100644 index 87cc997e3215..000000000000 --- a/Documentation/Books/Cookbook/Administration/NSISSilentMode.md +++ /dev/null @@ -1,40 +0,0 @@ -Installing ArangoDB unattended under Windows -============================================ - -Problem -------- -The Available NSIS based installer requires user interaction; This may be unwanted for unattended install i.e. via Chocolatey. - -Solution --------- -The NSIS installer now offers a ["Silent Mode"](http://nsis.sourceforge.net/Docs/Chapter3.html) which allows you to run it non interactive -and specify all choices available in the UI via commandline Arguments. - -The options are as all other NSIS options specified in the form of `/OPTIONNAME=value`. - -## Supported options - -*For Installation*: - - - PASSWORD - Set the database password. Newer versions will also try to evaluate a PASSWORD environment variable - - - INSTDIR - Installation directory. A directory where you have access to. - - DATABASEDIR - Database directory. A directory where you have access to and the databases should be created. - - APPDIR - Foxx Services directory. A directory where you have access to. - - INSTALL_SCOPE_ALL: - - 1 - AllUsers +Service - launch the arangodb service via the Windows Services, install it for all users - - 0 - SingleUser - install it into the home of this user, don'launch a service. Eventually create a desktop Icon so the user can do this. - - DESKTOPICON - [0/1] whether to create Icons on the desktop to reference arangosh and the webinterface - - PATH - - 0 - don't alter the PATH environment at all - - 1: - - INSTALL_SCOPE_ALL = 1 add it to the path for all users - - INSTALL_SCOPE_ALL = 0 add it to the path of the currently logged in users - - STORAGE_ENGINE - [auto/mmfiles/rocksdb] which storage engine to use (arangodb 3.2 onwards) - -*For Uninstallation*: - - PURGE_DB - [0/1] if set to 1 the database files ArangoDB created during its lifetime will be removed too. - -## Generic Options derived from NSIS - - - S - silent - don't open the UI during installation diff --git a/Documentation/Books/Cookbook/Administration/README.md b/Documentation/Books/Cookbook/Administration/README.md deleted file mode 100644 index d206ad7a6ea6..000000000000 --- a/Documentation/Books/Cookbook/Administration/README.md +++ /dev/null @@ -1,14 +0,0 @@ -Administration -============== - -- [Using Authentication](Authentication.md) - -- [Importing Data](ImportingData.md) - -- [Replicating Data](ReplicatingData.md) - -- [Installing ArangoDB unattended under Windows](NSISSilentMode.md) - -- [Migrating 2.8 to 3.0](Migrate2.8to3.0.md) - -- [A function to show grants in Arangosh](ShowUsersGrants.md) diff --git a/Documentation/Books/Cookbook/Administration/ReplicatingData.md b/Documentation/Books/Cookbook/Administration/ReplicatingData.md deleted file mode 100644 index 5c3ffc8d9fbe..000000000000 --- a/Documentation/Books/Cookbook/Administration/ReplicatingData.md +++ /dev/null @@ -1,184 +0,0 @@ -Replicating data from different databases -========================================= - -Problem -------- - -You have two or more different databases with various data respectively collections in each one of this, but you want your data to be collected at one place. - -**Note**: For this solution you need at least Arango 2.0 and you must run the script in every database you want to be collect data from. - -Solution --------- - -First of all you have to start a server on endpoint: - -``` -arangod --server.endpoint tcp://127.0.0.1:8529 -``` - -Now you have to create two collections and name them *data* and *replicationStatus* - -```js -db._create("data"); -db._create("replicationStatus"); -``` - -Save the following script in a file named *js/common/modules/org/mysync.js* - -```js -var internal = require("internal"); - -// maximum number of changes that we can handle -var maxChanges = 1000; - -// URL of central node -var transferUrl = "http://127.0.0.1:8599/_api/import?collection=central&type=auto&createCollection=true&complete=true"; - -var transferOptions = { - method: "POST", - timeout: 60 -}; - -// the collection that keeps the status of what got replicated to central node -var replicationCollection = internal.db.replicationStatus; - -// the collection containing all data changes -var changesCollection = internal.db.data; - -function keyCompare (l, r) { - if (l.length != r.length) { - return l.length - r.length < 0 ? -1 : 1; - } - - // length is equal - for (i = 0; i < l.length; ++i) { - if (l[i] != r[i]) { - return l[i] < r[i] ? -1 : 1; - } - } - - return 0; -}; - -function logger (msg) { - "use strict"; - - require("console").log("%s", msg); -} - -function replicate () { - "use strict"; - - var key = "status"; // const - - var status, newStatus; - try { - // fetch the previous replication state - status = replicationCollection.document(key); - newStatus = { _key: key, lastKey: status.lastKey }; - } - catch (err) { - // no previous replication state. start from the beginning - newStatus = { _key: key, lastKey: "0" }; - } - - // fetch the latest changes (need to reverse them because `last` returns newest changes first) - var changes = changesCollection.last(maxChanges).reverse(), change; - var transfer = [ ]; - for (change in changes) { - if (changes.hasOwnProperty(change)) { - var doc = changes[change]; - if (keyCompare(doc._key, newStatus.lastKey) <= 0) { - // already handled in a previous replication run - continue; - } - - // documents we need to transfer - // if necessary, we could rewrite the documents here, e.g. insert - // extra values, create client-specific keys etc. - transfer.push(doc); - - if (keyCompare(doc._key, newStatus.lastKey) > 0) { - // keep track of highest key - newStatus.lastKey = doc._key; - } - } - } - - if (transfer.length === 0) { - // nothing to do - logger("nothing to transfer"); - return; - } - - logger("transferring " + transfer.length + " document(s)"); - - // now transfer the documents to the remote server - var result = internal.download(transferUrl, JSON.stringify(transfer), transferOptions); - - if (result.code >= 200 && result.code <= 202) { - logger("central server accepted the documents: " + JSON.stringify(result)); - } - else { - // error - logger("central server did not accept the documents: " + JSON.stringify(result)); - throw "replication error"; - } - - // update the replication state - if (status) { - // need to update the previous replication state - replicationCollection.update(key, newStatus); - } - else { - // need to insert the replication state (1st time) - replicationCollection.save(newStatus); - } - - logger("deleting old documents"); - - // finally remove all elements that we transferred successfully from the changes collection - // no need to keep them - transfer.forEach(function (k) { - changesCollection.remove(k); - }); -} - -exports.execute = function (param) { - "use strict"; - - logger("replication wake up"); - replicate(); - logger("replication shutdown"); -}; -``` - -Afterwards change the URL of the central node in the script to the one you chosen before - e.g. *tcp://127.0.0.1:8599* - -Now register the script as a recurring action: - -```js -require("internal").definePeriodic(1, 10, "org/arangodb/mysync", "execute", ""); -``` - -**Note**: At this point you can change the time the script will be executed. - -Comment -------- - -The server started on endpoint will be the central node. It collects changes from the local node by replicating its data. -The script will pick up everything that has been changed since the last alteration in your *data* collection. -Every 10 seconds - or the time you chosen - the script will be executed and send the changed data to the central -node where it will be imported into a collection named *central*. -After that the transferred data will be removed from the *data* collection. - -If you want to test your script simply add some data to your *data* collection - e.g.: - -```js -for (i = 0; i < 100; ++i) db.data.save({ value: i }); -``` - -**Author:** [Jan Steemann](https://github.com/jsteemann) - -**Tags:** #database #collection \ No newline at end of file diff --git a/Documentation/Books/Cookbook/Administration/Replication/README.md b/Documentation/Books/Cookbook/Administration/Replication/README.md deleted file mode 100644 index 6a67150809f5..000000000000 --- a/Documentation/Books/Cookbook/Administration/Replication/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Replication - -This Section includes cookbook recipes related to the *Replication* topic. - -* [Replicating data from different databases](../ReplicatingData.md) -* [Speeding up slave initialization](ReplicationFromBackup.md) diff --git a/Documentation/Books/Cookbook/Administration/Replication/ReplicationFromBackup.md b/Documentation/Books/Cookbook/Administration/Replication/ReplicationFromBackup.md deleted file mode 100644 index 1d86d1851714..000000000000 --- a/Documentation/Books/Cookbook/Administration/Replication/ReplicationFromBackup.md +++ /dev/null @@ -1,257 +0,0 @@ -# Speeding up slave initialization - -## Problem - -You have a very big database and want to set up a `master-slave` replication between two or more ArangoDB instances. Transfering the entire database over the network may take a long time, if the database is big. In order to speed-up the replication initialization process the **slave** can be initialized using a backup of the **master**. - -For the following example setup, we will use the instance with endpoint `tcp://master.domain.org:8529` as master, and the instance with endpoint `tcp://slave.domain.org:8530` as slave. - -The goal is to have all data from the database ` _system` on master replicated to the database `_system` on the slave (the same process can be applied for other databases) . - -## Solution - -First of all you have to start the master server, using a command like the above: - -```sh -arangod --server.endpoint tcp://master.domain.org:8529 -``` - -Depending on your storage engine you also want to adjust the following options: - -For MMFiles: - -```sh ---wal.historic-logfiles (maximum number of historic logfiles to keep after collection - (default: 10)) -``` - -For RocksDB: - -```sh ---rocksdb.wal-file-timeout (timeout after which unused WAL files are deleted - in seconds (default: 10)) -``` - -The options above prevent the premature removal of old WAL files from the master, and are useful in case intense write operations happen on the master while you are initializing the slave. In fact, if you do not tune these options, what can happen is that the master WAL files do not include all the write operations happened after the backup is taken. This may lead to situations in which the initialized slave is missing some data, or fails to start. - -Now you have to create a dump from the master using the tool `arangodump`: - -```sh -arangodump --output-directory "dump" --server.endpoint tcp://master.domain.org:8529 -``` - -Please adapt the `arangodump` command to your specific case. - -The following is a possible `arangodump` output: - -```sh -Server version: 3.3 -Connected to ArangoDB 'tcp://master.domain.org:8529', database: '_system', username: 'root' -Writing dump to output directory 'dump' -Last tick provided by server is: 37276350 -# Dumping document collection 'TestNums'... -# Dumping document collection 'TestNums2'... -# Dumping document collection 'frenchCity'... -# Dumping document collection 'germanCity'... -# Dumping document collection 'persons'... -# Dumping edge collection 'frenchHighway'... -# Dumping edge collection 'germanHighway'... -# Dumping edge collection 'internationalHighway'... -# Dumping edge collection 'knows'... -Processed 9 collection(s), wrote 1298855504 byte(s) into datafiles, sent 32 batch(es) -``` - -In line *4* the last server `tick` is displayed. This value will be useful when we will start the replication, to have the `replication-applier` start replicating exactly from that `tick`. - -Next you have to start the slave: - -```sh -arangod --server.endpoint tcp://slave.domain.org:8530 -``` - -If you are running master and slave on the same server (just for test), please make sure you give your slave a different data directory. - -Now you are ready to restore the dump with the tool `arangorestore`: - -```sh -arangorestore --input-directory "dump" --server.endpoint tcp://slave.domain.org:8530 -``` - -Again, please adapt the command above in case you are using a database different than `_system`. - -Once the restore is finished there are two possible approaches to start the replication. - -### Approach 1: All-in-one setup - -Start replication on the slave with `arangosh` using the following command: - -```sh -arangosh --server.endpoint tcp://slave.domain.org:8530 -``` - -```js -db._useDatabase("_system"); -require("@arangodb/replication").setupReplication({ - endpoint: "tcp://master.domain.org:8529", - username: "myuser", - password: "mypasswd", - verbose: false, - includeSystem: false, - incremental: true, - autoResync: true -}); -``` - -The following is the printed output: - -```sh -still synchronizing... last received status: 2017-12-06T14:06:25Z: fetching collection keys for collection 'TestNums' from /_api/replication/keys/keys?collection=7173693&to=57482456&serverId=24282855553110&batchId=57482462 -still synchronizing... last received status: 2017-12-06T14:06:25Z: fetching collection keys for collection 'TestNums' from /_api/replication/keys/keys?collection=7173693&to=57482456&serverId=24282855553110&batchId=57482462 -[...] -still synchronizing... last received status: 2017-12-06T14:07:13Z: sorting 10000000 local key(s) for collection 'TestNums' -still synchronizing... last received status: 2017-12-06T14:07:13Z: sorting 10000000 local key(s) for collection 'TestNums' -[...] -still synchronizing... last received status: 2017-12-06T14:09:10Z: fetching master collection dump for collection 'TestNums3', type: document, id 37276943, batch 2, markers processed: 15278, bytes received: 2097258 -still synchronizing... last received status: 2017-12-06T14:09:18Z: fetching master collection dump for collection 'TestNums5', type: document, id 37276973, batch 5, markers processed: 123387, bytes received: 17039688 -[...] -still synchronizing... last received status: 2017-12-06T14:13:49Z: fetching master collection dump for collection 'TestNums5', type: document, id 37276973, batch 132, markers processed: 9641823, bytes received: 1348744116 -still synchronizing... last received status: 2017-12-06T14:13:59Z: fetching collection keys for collection 'frenchCity' from /_api/replication/keys/keys?collection=27174045&to=57482456&serverId=24282855553110&batchId=57482462 -{ - "state" : { - "running" : true, - "lastAppliedContinuousTick" : null, - "lastProcessedContinuousTick" : null, - "lastAvailableContinuousTick" : null, - "safeResumeTick" : null, - "progress" : { - "time" : "2017-12-06T14:13:59Z", - "message" : "send batch finish command to url /_api/replication/batch/57482462?serverId=24282855553110", - "failedConnects" : 0 - }, - "totalRequests" : 0, - "totalFailedConnects" : 0, - "totalEvents" : 0, - "totalOperationsExcluded" : 0, - "lastError" : { - "errorNum" : 0 - }, - "time" : "2017-12-06T14:13:59Z" - }, - "server" : { - "version" : "3.3.devel", - "serverId" : "24282855553110" - }, - "endpoint" : "tcp://master.domain.org:8529", - "database" : "_system" -} -``` - -This is the same command that you would use to start replication even without taking a backup first. The difference, in this case, is that the data that is present already on the slave (and that has been restored from the backup) this time is not transferred over the network from the master to the slave. - -The command above will only check that the data already included in the slave is in sync with the master. After this check, the `replication-applier` will make sure that all write operations that happened on the master after the backup are replicated on the slave. - -While this approach is definitely faster than transferring the whole database over the network, since a sync check is performed, it can still require some time. - -### Approach 2: Apply replication by tick - -In this approach, the sync check described above is not performed. As a result this approach is faster as the existing slave data is not checked. Write operations are executed starting from the `tick` you provide and continue with the master's available `ticks`. - -This is still a secure way to start replication as far as the correct `tick` is passed. - -As previously mentioned the last `tick` provided by the master is displayed when using `arangodump`. In our example the last tick was **37276350**. - -First of all you have to apply the properties of the replication, using `arangosh` on the slave: - -```sh -arangosh --server.endpoint tcp://slave.domain.org:8530 -``` - -```js -db._useDatabase("_system"); -require("@arangodb/replication").applier.properties({ - endpoint: "tcp://master.domain.org:8529", - username: "myuser", - password: "mypasswd", - verbose: false, - includeSystem: false, - incremental: true, - autoResync: true}); -``` - -Then you can start the replication with the last provided `logtick` of the master (output of `arangodump`): - -```js -require("@arangodb/replication").applier.start(37276350) -``` - -The following is the printed output: - -```sh -{ - "state" : { - "running" : true, - "lastAppliedContinuousTick" : null, - "lastProcessedContinuousTick" : null, - "lastAvailableContinuousTick" : null, - "safeResumeTick" : null, - "progress" : { - "time" : "2017-12-06T13:26:04Z", - "message" : "applier initially created for database '_system'", - "failedConnects" : 0 - }, - "totalRequests" : 0, - "totalFailedConnects" : 0, - "totalEvents" : 0, - "totalOperationsExcluded" : 0, - "lastError" : { - "errorNum" : 0 - }, - "time" : "2017-12-06T13:33:25Z" - }, - "server" : { - "version" : "3.3.devel", - "serverId" : "176090204017635" - }, - "endpoint" : "tcp://master.domain.org:8529", - "database" : "_system" -} -``` - -After the replication has been started with the command above, you can use the `applier.state` command to check how far the last applied `tick` on the slave is far from the last available master `tick`: - -```sh -require("@arangodb/replication").applier.state() -{ - "state" : { - "running" : true, - "lastAppliedContinuousTick" : "42685113", - "lastProcessedContinuousTick" : "42685113", - "lastAvailableContinuousTick" : "57279944", - "safeResumeTick" : "37276974", - "progress" : { - "time" : "2017-12-06T13:35:25Z", - "message" : "fetching master log from tick 42685113, first regular tick 37276350, barrier: 0, open transactions: 1", - "failedConnects" : 0 - }, - "totalRequests" : 190, - "totalFailedConnects" : 0, - "totalEvents" : 2704032, - "totalOperationsExcluded" : 0, - "lastError" : { - "errorNum" : 0 - }, - "time" : "2017-12-06T13:35:25Z" - }, - "server" : { - "version" : "3.3.devel", - "serverId" : "176090204017635" - }, - "endpoint" : "tcp://master.domain.org:8529", - "database" : "_system" -} -``` - - -**Author:** [Max Kernbach](https://github.com/maxkernbach) - -**Tags:** #database #replication #arangodump #arangorestore diff --git a/Documentation/Books/Cookbook/Administration/ShowUsersGrants.md b/Documentation/Books/Cookbook/Administration/ShowUsersGrants.md deleted file mode 100644 index 57e7f82bb49d..000000000000 --- a/Documentation/Books/Cookbook/Administration/ShowUsersGrants.md +++ /dev/null @@ -1,48 +0,0 @@ -Show grants function -==================== - -Problem -------- - -I'm looking for user database grants - - -Solution --------- -Create a global function in your _.arangosh.rc_ file like this: -``` -global.show_grants = function () { - let stmt; - stmt=db._createStatement({"query": "FOR u in _users RETURN {\"user\": u.user, \"databases\": u.databases}"}); - console.log(stmt.execute().toString()); -}; -``` -Now when you enter in arangosh, you can call **show_grants()** function. - -#### Function out example -``` -[object ArangoQueryCursor, count: 3, hasMore: false] - - -[ - { - "user" : "foo", - "databases" : { - "_system" : "rw", - "bar" : "rw" - } - }, - { - "user" : "foo2", - "databases" : { - "bar" : "rw" - } - }, - { - "user" : "root", - "databases" : { - "*" : "rw" - } - } -] -``` diff --git a/Documentation/Books/Cookbook/Cloud/DockerContainer.md b/Documentation/Books/Cookbook/Cloud/DockerContainer.md deleted file mode 100644 index d525bf4412e1..000000000000 --- a/Documentation/Books/Cookbook/Cloud/DockerContainer.md +++ /dev/null @@ -1,16 +0,0 @@ -How to run ArangoDB in a Docker container -========================================= - -Problem -------- - -How do you make ArangoDB run in a Docker container? - -Solution --------- - -ArangoDB is now available as an [official repository in the Docker Hub](https://hub.docker.com/_/arangodb/) (@see documentation there). - -**Author:** [Frank Celler](https://github.com/fceller) - -**Tags:** #docker #howto diff --git a/Documentation/Books/Cookbook/Cloud/NodeJsDocker.md b/Documentation/Books/Cookbook/Cloud/NodeJsDocker.md deleted file mode 100644 index 6a1056ebe4f4..000000000000 --- a/Documentation/Books/Cookbook/Cloud/NodeJsDocker.md +++ /dev/null @@ -1,144 +0,0 @@ -ArangoDB, NodeJS and Docker -=========================== - -Problem -------- - -I'm looking for a head start in using the ArangoDB docker image. - -Solution --------- - -We will use the guesser game for ArangoDB from - -``` -https://github.com/arangodb/guesser -``` - -This is a simple game guessing animals or things. It learns while playing -and stores the learned information in an ArangoDB instance. The game is written using the -express framework. - -**Note**: You need to switch to the docker branch. - -The game has the two components - -* front-end with node.js and express -* back-end with ArangoDB and Foxx - -Therefore the guesser game needs two docker containers, one container for the node.js -server to run the front-end code and one container for ArangoDB for the storage back-end. - -### Node Server - -The game is itself can be install via NPM or from github. There is an image available from -dockerhub called `arangodb/example-guesser` which is based on the Dockerfile -from github. - -You can either build the docker container locally or simply use the available one from -docker hub. - -``` -unix> docker run -p 8000:8000 -e nolink=1 arangodb/example-guesser -Starting without a database link -Using DB-Server http://localhost:8529 -Guesser app server listening at http://0.0.0.0:8000 -``` - -This will start-up node and the guesser game is available on port 8000. Now point your -browser to port 8000. You should see the start-up screen. However, without a storage -backend it will be pretty useless. Therefore, stop the container and proceed with the next -step. - -If you want to build the container locally, check out the guesser game from - -``` -https://github.com/arangodb/example-guesser -``` - -Switch into the `docker/node` subdirectory and execute `docker build .`. - -### ArangoDB - -ArangoDB is already available on docker, so we start an instance - -``` -unix> docker run --name arangodb-guesser arangodb/arangodb -show all options: - docker run -e help=1 arangodb - -starting ArangoDB in stand-alone mode -``` - -That's it. Note that in an productive environment you would need to attach a storage -container to it. We ignore this here for the sake of simplicity. - -### Guesser Game - - -#### Some Testing - -Use the guesser game image to start the ArangoDB shell and link the ArangoDB instance to -it. - -``` -unix> docker run --link arangodb-guesser:db-link -it arangodb/example-guesser arangosh --server.endpoint @DB_LINK_PORT_8529_TCP@ -``` - -The parameter `--link arangodb-guesser:db-link` links the running ArangoDB into the -application container and sets an environment variable `DB_LINK_PORT_8529_TCP` which -points to the exposed port of the ArangoDB container: - -``` -DB_LINK_PORT_8529_TCP=tcp://172.17.0.17:8529 -``` - -Your IP may vary. The command `arangosh ...` at the end of docker command executes the -ArangoDB shell instead of the default node command. - -``` -Welcome to arangosh 2.3.1 [linux]. Copyright (c) ArangoDB GmbH -Using Google V8 3.16.14 JavaScript engine, READLINE 6.3, ICU 52.1 - -Pretty printing values. -Connected to ArangoDB 'tcp://172.17.0.17:8529' version: 2.3.1, database: '_system', username: 'root' - -Type 'tutorial' for a tutorial or 'help' to see common examples -arangosh [_system]> -``` - -The important line is - -``` -Connected to ArangoDB 'tcp://172.17.0.17:8529' version: 2.3.1, database: '_system', username: 'root' -``` - -It tells you that the application container was able to connect to the database -back-end. Press `Control-D` to exit. - -#### Start Up The Game - -Ready to play? Start the front-end container with the database link and initialize the database. - -``` -unix> docker run --link arangodb-guesser:db-link -p 8000:8000 -e init=1 arangodb/example-guesser -``` - -Use your browser to play the game at the address http://127.0.0.1:8000/. -The - -``` --e init=1 -``` - -is only need the first time you start-up the front-end and only once. The next time you -run the front-end or if you start a second front-end server use - -``` -unix> docker run --link arangodb-guesser:db-link -p 8000:8000 arangodb/example-guesser -``` - - -**Author**: [Frank Celler](https://github.com/fceller) - -**Tags**: #docker diff --git a/Documentation/Books/Cookbook/Cloud/README.md b/Documentation/Books/Cookbook/Cloud/README.md deleted file mode 100644 index 8e1e78ae246b..000000000000 --- a/Documentation/Books/Cookbook/Cloud/README.md +++ /dev/null @@ -1,9 +0,0 @@ -Cloud, DCOS and Docker -====================== - -Docker ------- - -- [Docker ArangoDB](DockerContainer.md) -- [Docker with NodeJS App](NodeJsDocker.md) - diff --git a/Documentation/Books/Cookbook/Compiling/Debian.md b/Documentation/Books/Cookbook/Compiling/Debian.md deleted file mode 100644 index 53de7178f8d1..000000000000 --- a/Documentation/Books/Cookbook/Compiling/Debian.md +++ /dev/null @@ -1,251 +0,0 @@ -Compiling on Debian -=================== - -Problem -------- - -You want to compile and run the devel branch, for example to test a bug fix. In this example the system is Debian based. - -Solution --------- - -This solution was made using a fresh Debian Testing machine on Amazon EC2. For completeness, the steps pertaining to AWS are also included in this recipe. - -### Launch the VM - -*Optional* - -Login to your AWS account and launch an instance of Debian Testing. I used an 'm3.xlarge' since that has a bunch of cores, more than enough memory, optimized network and the instance store is on SSDs which can be switched to provisioned IOPs. - -The Current AMI ID's can be found in the Debian Wiki: https://wiki.debian.org/Cloud/AmazonEC2Image/Jessie - -### Upgrade to the very latest version - -*Optional* - -Once your EC2 instance is up, login ad `admin` and `sudo su` to become `root`. - -First, we remove the backports and change the primary sources.list - -```bash -rm -rf /etc/apt/sources.list.d -echo "deb http://http.debian.net/debian testing main contrib" > /etc/apt/sources.list -echo "deb-src http://http.debian.net/debian testing main contrib" >> /etc/apt/sources.list -``` - -Update and upgrade the system. Make sure you don't have any broken/unconfigured packages. Sometimes you need to run safe/full upgrade more than once. When you're done, reboot. - -```bash -apt-get install aptitude -aptitude -y update -aptitude -y safe-upgrade -aptitude -y full-upgrade -reboot -``` - -### Install build dependencies - -*Mandatory* - -Before you can build ArangoDB, you need a few packages pre-installed on your system. - -Login again and install them. - -```bash -sudo aptitude -y install git-core \ - build-essential \ - libssl-dev \ - libjemalloc-dev \ - cmake \ - python2.7 \ -sudo aptitude -y install libldap2-dev # Enterprise Edition only -``` - -### -Download the Source - -Download the latest source using ***git***: - - unix> git clone git://github.com/arangodb/arangodb.git - -This will automatically clone the **devel** branch. - -Note: if you only plan to compile ArangoDB locally and do not want to modify or push -any changes, you can speed up cloning substantially by using the *--single-branch* and -*--depth* parameters for the clone command as follows: - - unix> git clone --single-branch --depth 1 git://github.com/arangodb/arangodb.git - -### Setup - -Switch into the ArangoDB directory - - unix> cd arangodb - unix> mkdir build - unix> cd build - -In order to generate the build environment please execute - - unix> cmake .. - -to setup the Makefiles. This will check the various system characteristics and -installed libraries. If you installed the compiler in a non standard location, you may need to specify it: - - cmake -DCMAKE_C_COMPILER=/opt/bin/gcc -DCMAKE_CXX_COMPILER=/opt/bin/g++ .. - -If you compile on macOS, you should add the following options to the cmake command: - - cmake .. -DOPENSSL_ROOT_DIR=/usr/local/opt/openssl -DCMAKE_OSX_DEPLOYMENT_TARGET=10.11 - -If you also plan to make changes to the source code of ArangoDB, you may want to compile with the -`Debug` build type: - - cmake .. -DCMAKE_BUILD_TYPE=Debug - -The `Debug` target enables additional sanity checks etc. which would slow down production -binaries. If no build type is specified, ArangoDB will be compiled with build type `RelWithDebInfo`, -which is a compromise between good performance and medium debugging experience. - -Other options valuable for development: - - -DUSE_MAINTAINER_MODE=On - -Needed if you plan to make changes to AQL language (which is implemented using a lexer and parser -files in `arangod/Aql/grammar.y` and `arangod/Aql/tokens.ll`) or if you want to enable runtime -assertions. To use the maintainer mode, your system has to contain the tools FLEX and BISON. - - -DUSE_BACKTRACE=On - -Use this option if you want to have C++ stacktraces attached to your exceptions. This can be useful -to more quickly locate the place where an exception or an assertion was thrown. Note that this -option will slow down the produces binaries a bit and requires building with maintainer mode. - - -DUSE_OPTIMIZE_FOR_ARCHITECTURE=On - -This will optimize the binary for the target architecture, potentially enabling more compiler -optimizations, but making the resulting binary less portable. - -ArangoDB will then automatically use the configuration from file *etc/relative/arangod.conf*. - - -DUSE_FAILURE_TESTS=On - -This option activates additional code in the server that intentionally makes the -server crash or misbehave (e.g. by pretending the system ran out of memory) when certain tests -are run. This option is useful for writing tests. - - -DUSE_JEMALLOC=Off - -By default ArangoDB will be built with a bundled version of the JEMalloc allocator. This -however will not work when using runtime analyzers such as ASAN or Valgrind. In order to use -these tools for instrumenting an ArangoDB binary, JEMalloc must be turned off during compilation. - -### shared memory -Gyp is used as makefile generator by V8. Gyp requires shared memory to be available, -which may not if you i.e. compile in a chroot. You can make it available like this: - - none /opt/chroots/ubuntu_precise_x64/dev/shm tmpfs rw,nosuid,nodev,noexec 0 2 - devpts /opt/chroots/ubuntu_precise_x64/dev/pts devpts gid=5,mode=620 0 0 - - -### Compilation - -Compile the programs (server, client, utilities) by executing - - make - -in the build subdirectory. This will compile ArangoDB and create the binary executable -in file `build/bin/arangod`. - -### Starting and testing - -Check the binary by starting it using the command line. - - unix> build/bin/arangod -c etc/relative/arangod.conf --server.endpoint tcp://127.0.0.1:8529 /tmp/database-dir - -This will start up the ArangoDB and listen for HTTP requests on port 8529 bound -to IP address 127.0.0.1. You should see the startup messages similar to the -following: - -``` -2016-06-01T12:47:29Z [29266] INFO ArangoDB xxx ... -2016-06-10T12:47:29Z [29266] INFO using endpoint 'tcp://127.0.0.1.8529' for non-encrypted requests -2016-06-01T12:47:30Z [29266] INFO Authentication is turned on -2016-60-01T12:47:30Z [29266] INFO ArangoDB (version xxx) is ready for business. Have fun! -``` - -If it fails with a message about the database directory, please make sure the -database directory you specified exists and can be written into. - -Use your favorite browser to access the URL - - http://127.0.0.1:8529/ - -This should bring up ArangoDB's web interface. - -### Re-building ArangoDB after an update - -To stay up-to-date with changes made in the main ArangoDB repository, you will -need to pull the changes from it and re-run `make`. - -Normally, this will be as simple as follows: - - unix> git pull - unix> (cd build && make) - -From time to time there will be bigger structural changes in ArangoDB, which may -render the old Makefiles invalid. Should this be the case and `make` complains -about missing files etc., the following commands should fix it: - - - unix> rm -rf build/* - unix> cd build && cmake .. - unix> (cd build && make) - -Note that the above commands will run a full rebuild of ArangoDB and all -of its third-party components. That will take a while to complete. - -### Installation - -In a local development environment it is not necessary to install ArangoDB -somewhere, because it can be started from within the source directory as -shown above. - -If there should be the need to install ArangoDB, execute the following command: - - (cd build && sudo make install) - -The server will by default be installed in - - /usr/local/sbin/arangod - -The configuration file will be installed in - - /usr/local/etc/arangodb3/arangod.conf - -The database will be installed in - - /usr/local/var/lib/arangodb3 - -The ArangoShell will be installed in - - /usr/local/bin/arangosh - -You should add an arangodb user and group (as root), plus make sure it owns these directories: - - useradd -g arangodb arangodb - chown -R arangodb:arangodb /usr/local/var/lib/arangodb3-apps/ - chown -R arangodb:arangodb /tmp/database-dir/ - -**Note:** The installation directory will be different if you use one of the -`precompiled` packages. Please check the default locations of your operating -system, e. g. `/etc` and `/var/lib`. - -When upgrading from a previous version of ArangoDB, please make sure you inspect -ArangoDB's log file after an upgrade. It may also be necessary to start ArangoDB -with the *--database.auto-upgrade* parameter once to perform required upgrade or -initialization tasks. - -**Author:** [Patrick Huber](https://github.com/stackmagic) -**Author:** [Wilfried Goesgens](https://github.com/dothebart) - -**Tags:** #debian #driver diff --git a/Documentation/Books/Cookbook/Compiling/README.md b/Documentation/Books/Cookbook/Compiling/README.md deleted file mode 100644 index 271df1a51b7a..000000000000 --- a/Documentation/Books/Cookbook/Compiling/README.md +++ /dev/null @@ -1,25 +0,0 @@ -Compiling ArangoDB -================== - -Problem -------- - -You want to modify sources or add your own changes to ArangoDB. - -Solution --------- - -ArangoDB, as many other open source projects nowadays, is standing on the shoulder of giants. -This gives us a solid foundation to bring you a unique feature set, but it introduces a lot of -dependencies that need to be in place in order to compile ArangoDB. - -Since build infrastructures are very different depending on the target OS, choose your target -from the recipes below. - -- [Compile on Debian](Debian.md) - -- [Compile on Windows](Windows.md) - -- [Running Custom Build](RunningCustomBuild.md) - - - [Recompiling jemalloc](jemalloc.md) diff --git a/Documentation/Books/Cookbook/Compiling/RunningCustomBuild.md b/Documentation/Books/Cookbook/Compiling/RunningCustomBuild.md deleted file mode 100644 index 3cdb904013c0..000000000000 --- a/Documentation/Books/Cookbook/Compiling/RunningCustomBuild.md +++ /dev/null @@ -1,59 +0,0 @@ -Running a custom build -====================== - -Problem -------- - -You've already built a custom version of ArangoDB and want to run it. Possibly in isolation from an existing installation or you may want to re-use the data. - -Solution --------- - -First, you need to build your own version of ArangoDB. If you haven't done so -already, have a look at any of the [Compiling](README.md) recipes. - -This recipe assumes you're in the root directory of the ArangoDB distribution and compiling has successfully finished. - -### Running in isolation - -This part shows how to run your custom build with an empty database directory - -```bash -# create data directory -mkdir /tmp/arangodb - -# run -bin/arangod \ - --configuration etc/relative/arangod.conf\ - --database.directory /tmp/arangodb -``` - -### Running with data - -This part shows how to run your custom build with the config and data from a pre-existing stable installation. - -{% hint 'danger' %} -ArangoDB's developers may change the db file format and after running with a -changed file format, there may be no way back. Alternatively you can run your -build in isolation and [dump](../../Manual/Programs/Arangodump/index.html) and -[restore](../../Manual/Programs/Arangorestore/index.html) the data from the -stable to your custom build. -{% endhint %} - -When running like this, you must run the db as the arangod user (the default -installed by the package) in order to have write access to the log, database -directory etc. Running as root will likely mess up the file permissions - good -luck fixing that! - -```bash -# become root first -su - -# now switch to arangod and run -su - arangod -bin/arangod --configuration /etc/arangodb/arangod.conf -``` - -**Author:** [Patrick Huber](https://github.com/stackmagic) - -**Tags:** #build diff --git a/Documentation/Books/Cookbook/Compiling/Windows.md b/Documentation/Books/Cookbook/Compiling/Windows.md deleted file mode 100644 index 330e00d55a91..000000000000 --- a/Documentation/Books/Cookbook/Compiling/Windows.md +++ /dev/null @@ -1,197 +0,0 @@ -Compiling ArangoDB under Windows -================================ - -Problem -------- - -I want to compile ArangoDB 3.4 and onwards under Windows. - -**Note:** If you want to compile version 3.3 or earlier, then look at the -[Compiling ArangoDB under Windows](https://docs.arangodb.com/3.3/Cookbook/Compiling/Windows.html) -recipe in the 3.3 documentation. - -Solution --------- - -With ArangoDB 3.0 a complete cmake environment was introduced. This also streamlines the dependencies on Windows. -We suggest to use [chocolatey.org](https://chocolatey.org/) to install most of the dependencies. For sure -most projects offer their own setup & install packages, chocolatey offers a simplified way to install them -with less user interactions. You can even use chocolatey via -[ansibles 2.7 winrm facility](https://docs.ansible.com/ansible/latest/user_guide/windows.html) -to do unattended installations of some software on Windows. - -### Ingredients - -First install the choco package manager by pasting this tiny cmdlet into a command window -*(needs to be run with Administrator privileges; Right click start menu, **Command Prompt (Admin)**)*: - - @powershell -NoProfile -ExecutionPolicy Bypass -Command "iex ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1'))" && SET PATH=%PATH%;%ALLUSERSPROFILE%\chocolatey\bin - -#### Visual Studio and its Compiler - -Since choco currently fails to alter the environment for -[Microsoft Visual Studio](https://www.visualstudio.com/en-us/products/visual-studio-community-vs.aspx), -we suggest to download and install Visual Studio by hand. -Currently Visual Studio 2017 is the only supported option. - -{% hint 'warning' %} -You need to make sure that it installs the **Desktop development with C++** preset, -else cmake will fail to detect it later on. Furthermore, the **Windows 8.1 SDK and UCRT SDK** -optional component is required to be selected during Visual Studio installation, else V8 -will fail to compile later on. -{% endhint %} - -After it successfully installed, start it once, so it can finish its setup. - -#### More Dependencies - -Now you can invoke the choco package manager for an unattended install of the dependencies -*(needs to be run with Administrator privileges again)*: - - choco install -y cmake.portable nsis python2 procdump windbg wget - -Then we need to install the [OpenSSL](https://openssl.org) library from its sources or using precompiled -[Third Party OpenSSL Related Binary Distributions](https://wiki.openssl.org/index.php/Binaries). - -#### Optional Dependencies - -If you intend to run the unit tests or compile from git, you also need -*(needs to be run with Administrator privileges again)*: - - choco install -y git winflexbison ruby - -Close and reopen the Administrator command window in order to continue with the ruby devkit: - - choco install -y ruby2.devkit - -And manually install the requirements via the `Gemfile` fetched from the ArangoDB Git repository -*(needs to be run with Administrator privileges)*: - - wget https://raw.githubusercontent.com/arangodb/arangodb/devel/tests/rb/HttpInterface/Gemfile - setx PATH %PATH%;C:\tools\DevKit2\bin;C:\tools\DevKit2\mingw\bin - gem install bundler - bundler - -Note that the V8 build scripts and gyp aren't compatible with Python 3.x hence you need python2! - -### Building ArangoDB - -Download and extract the release tarball from https://www.arangodb.com/download/ - -Or clone the GitHub repository and checkout the branch or tag you need (e.g. `devel`) - - git clone https://github.com/arangodb/arangodb.git -b devel - cd arangodb - -Generate the Visual studio project files, and check back that cmake discovered all components on your system: - - mkdir Build64 - cd Build64 - cmake -G "Visual Studio 15 2017 Win64" .. - -Note that in some cases cmake struggles to find the proper python interpreter -(i.e. the cygwin one won't work). You can force overrule it by appending: - - -DPYTHON_EXECUTABLE:FILEPATH=C:/Python27/python.exe - -You can now load these in the Visual Studio IDE or use cmake to start the build: - - cmake --build . --config RelWithDebInfo - -The binaries need the ICU datafile `icudt54l.dat`, which is automatically copied into the directory containing the -executable. - -### Unit tests (Optional) - -The unit tests require a [cygwin](https://www.cygwin.com/) environment. - -#### Cygwin Installation Hints - -You need at least `make` from cygwin. Cygwin also offers a `cmake`. Do **not** install the cygwin cmake. - -You should also issue these commands to generate user information for the cygwin commands: - - mkpasswd > /etc/passwd - mkgroup > /etc/group - -Turning ACL off (noacl) for all mounts in cygwin fixes permissions troubles that may appear in the build: - - # /etc/fstab - # - # This file is read once by the first process in a Cygwin process tree. - # To pick up changes, restart all Cygwin processes. For a description - # see https://cygwin.com/cygwin-ug-net/using.html#mount-table - - # noacl = Ignore Access Control List and let Windows handle permissions - C:/cygwin64/bin /usr/bin ntfs binary,auto,noacl 0 0 - C:/cygwin64/lib /usr/lib ntfs binary,auto,noacl 0 0 - C:/cygwin64 / ntfs override,binary,auto,noacl 0 0 - none /cygdrive cygdrive binary,posix=0,user,noacl 0 0 - -#### Enable native symlinks for Cygwin and git - -Cygwin will create proprietary files as placeholders by default instead of -actually symlinking files. The placeholders later tell Cygwin where to resolve -paths to. It does not intercept every access to the placeholders however, so -that 3rd party scripts break. Windows Vista and above support real symlinks, -and Cygwin can be configured to make use of it: - - # use actual symlinks to prevent documentation build errors - # (requires elevated rights!) - export CYGWIN="winsymlinks:native" - -Note that you must run Cygwin as administrator or change the Windows group -policies to allow user accounts to create symlinks (`gpedit.msc` if available). - -BTW: You can create symlinks manually on Windows like: - - mklink /H target/file.ext source/file.ext - mklink /D target/path source/path - mklink /J target/path source/path/for/junction - -And in Cygwin: - - ln -s source target - -#### Making the ICU database publically available - -If you intend to use the machine for development purposes, it may be more practical to copy it to a common place: - - cd 3rdParty/V8/v*/third_party/icu/source/data/in && cp icudt*.dat /cygdrive/c/Windows/ - -And configure your environment (yes this instruction remembers to the hitchhikers guide to the galaxy...) so that -`ICU_DATA` points to `c:\\Windows`. You do that by opening the explorer, -right click on `This PC` in the tree on the left, choose `Properties` in the opening window `Advanced system settings`, -in the Popup `Environment Variables`, another popup opens, in the `System Variables` part you click `New`, -And variable name: `ICU_DATA` to the value: `c:\\Windows` - -![HowtoSetEnv](../assets/CompilingUnderWindows/SetEnvironmentVar.png) - -#### Running Unit tests - -You can then run the integration tests in the cygwin shell like that: - - Build64/bin/RelWithDebInfo/arangosh.exe \ - -c etc/relative/arangosh.conf \ - --log.level warning \ - --server.endpoint tcp://127.0.0.1:1024 \ - --javascript.execute UnitTests/unittest.js \ - -- \ - all \ - --build Build64 \ - --buildType RelWithDebInfo \ - --skipNondeterministic true \ - --skipTimeCritical true \ - --skipBoost true \ - --skipGeo true - -Additional options `--ruby c:/tools/ruby25/bin/ruby` and `--rspec c:/tools/ruby25/bin/rspec` -should be used only if Ruby is not in the *PATH*. - -**Authors**: -[Frank Celler](https://github.com/fceller), -[Wilfried Goesgens](https://github.com/dothebart), -[Simran Brucherseifer](https://github.com/Simran-B) and -[Vadim Kondratyev](https://github.com/KVS85). - -**Tags**: #windows diff --git a/Documentation/Books/Cookbook/Compiling/jemalloc.md b/Documentation/Books/Cookbook/Compiling/jemalloc.md deleted file mode 100644 index dcec0774ec72..000000000000 --- a/Documentation/Books/Cookbook/Compiling/jemalloc.md +++ /dev/null @@ -1,40 +0,0 @@ -Jemalloc -======== - -**This article is only relevant if you intend to compile arangodb on Ubuntu 16.10 or debian testing** - -On more modern linux systems (development/floating at the time of this writing) you may get compile / link errors with arangodb regarding jemalloc. -This is due to compilers switching their default behaviour regarding the `PIC` - Position Independend Code. -It seems common that jemalloc remains in a stage where this change isn't followed and causes arangodb to error out during the linking phase. - -From now on cmake will detect this and give you this hint: - - the static system jemalloc isn't suitable! Recompile with the current compiler or disable using `-DCMAKE_CXX_FLAGS=-no-pie -DCMAKE_C_FLAGS=-no-pie` - -Now you've got three choices. - -Doing without jemalloc ----------------------- - -Fixes the compilation issue, but you will get problems with the glibcs heap fragmentation behaviour which in the longer run will lead to an ever increasing memory consumption of ArangoDB. - -So, while this may be suitable for development / testing systems, its definitely not for production. - -Disabling PIC altogether ------------------------- - -This will build an arangod which doesn't use this compiler feature. It may be not so nice for development builds. It can be achieved by specifying these options on cmake: - - -DCMAKE_CXX_FLAGS=-no-pie -DCMAKE_C_FLAGS=-no-pie - -Recompile jemalloc ------------------- - -The smartest way is to fix the jemalloc libraries packages on your system so its reflecting that new behaviour. On debian / ubuntu systems it can be achieved like this: - - apt-get install automake debhelper docbook-xsl xsltproc dpkg-dev - apt source jemalloc - cd jemalloc* - dpkg-buildpackage - cd .. - dpkg -i *jemalloc*deb diff --git a/Documentation/Books/Cookbook/DocumentInheritance.md b/Documentation/Books/Cookbook/DocumentInheritance.md deleted file mode 100644 index df56394726c2..000000000000 --- a/Documentation/Books/Cookbook/DocumentInheritance.md +++ /dev/null @@ -1,68 +0,0 @@ -# Model document inheritance - -## Problem - -How do you model document inheritance given that collections do not support that feature? - -## Solution - -Lets assume you have three document collections: "subclass", "class" and "superclass". You also have two edge collections: "sub_extends_class" and "class_extends_super". - -You can create them via arangosh or Foxx: - -```js -var graph_module = require("com/arangodb/general-graph"); -var g = graph_module._create("inheritance"); -g._extendEdgeDefinitions(graph_module. _directedRelation("sub_extends_class", ["subclass"], ["class"])); -g._extendEdgeDefinitions(graph_module. _directedRelation("class_extends_super", ["class"], ["superclass"])); -``` - -This makes sure when using the graph interface that the inheritance looks like: - -* sub → class -* class → super -* super → sub - -To make sure everything works as expected you should use the built-in traversal in combination with Foxx. This allows you to add the inheritance security layer easily. -To use traversals in Foxx simply add the following line before defining routes: - -```js -var traversal = require("org/arangodb/graph/traversal"); -var Traverser = traversal.Traverser; -``` - -Also you can add the following endpoint in Foxx: - -```js -var readerConfig = { - datasource: traversal.graphDatasourceFactory("inheritance"), - expander: traversal.outboundExpander, // Go upwards in the tree - visitor: function (config, result, vertex, path) { - for (key in vertex) { - if (vertex.hasOwnProperty(key) && !result.hasOwnProperty(key)) { - result[key] = vertex[key] // Store only attributes that have not yet been found - } - } - } -}; - -controller.get("load/:collection/:key", function(req, res) { - var result = {}; - var id = res.params("collection") + "/" + res.params("key"); - var traverser = new Traverser(readerConfig); - traverser.traverse(result, g.getVertex(id)); - res.json(result); -}); -``` - -This will make sure to iterate the complete inheritance tree upwards to the root element and will return all values on the path -were the first instance of this value is kept - -## Comment -You should go with edges because it is much easier to query them if you have a theoretically unlimited depth in inheritance. -If you have a fixed inheritance depth you could also go with an attribute in the document referencing the parent and execute joins in AQL. - - -**Author:** [Michael Hackstein](https://github.com/mchacki) - -**Tags:** #graph #document \ No newline at end of file diff --git a/Documentation/Books/Cookbook/FOOTER.html b/Documentation/Books/Cookbook/FOOTER.html deleted file mode 100644 index 239869bfaf6a..000000000000 --- a/Documentation/Books/Cookbook/FOOTER.html +++ /dev/null @@ -1 +0,0 @@ -© ArangoDB - the native multi-model NoSQL database \ No newline at end of file diff --git a/Documentation/Books/Cookbook/Graph/CustomVisitorFromNodeJs.md b/Documentation/Books/Cookbook/Graph/CustomVisitorFromNodeJs.md deleted file mode 100644 index f1a305293cac..000000000000 --- a/Documentation/Books/Cookbook/Graph/CustomVisitorFromNodeJs.md +++ /dev/null @@ -1,106 +0,0 @@ -Using a custom visitor from node.js -=================================== - -Problem -------- - -I want to traverse a graph using a custom visitor from node.js. - - -Solution --------- - -Use [arangojs](https://www.npmjs.com/package/arangojs) and an AQL query with a custom -visitor. - -### Installing arangojs - -First thing is to install *arangojs*. -This can be done using *npm* or *bower*: - -``` -npm install arangojs -``` - -or - -``` -bower install arangojs -``` - -### Example data setup - -For the following example, we need the example graph and data from -[here](https://jsteemann.github.io/downloads/code/world-graph-setup.js). -Please download the code from the link and store it in the filesystem using a filename -of `world-graph-setup.js`. Then start the ArangoShell and run the code from the file: - -```js -require("internal").load("/path/to/file/world-graph-setup.js"); -``` - -The script will create the following two collections and load some data into them: - -- `v`: a collection with vertex documents -- `e`: an edge collection containing the connections between vertices in `v` - -### Registering a custom visitor function - -Let's register a custom visitor function now. A custom visitor function is a JavaScript -function that is executed every time the traversal processes a vertex in the graph. - -To register a custom visitor function, we can execute the following commands in the -ArangoShell: - -```js -var aqlfunctions = require("org/arangodb/aql/functions"); - -aqlfunctions.register("myfunctions::leafNodeVisitor", function (config, result, vertex, path, connected) { - if (connected && connected.length === 0) { - return vertex.name + " (" + vertex.type + ")"; - } -}); -``` - -### Invoking the custom visitor - -The following code can be run in node.js to execute an AQL query that will -make use of the custom visitor: - -```js -Database = require('arangojs'); - -/* connection the database, change as required */ -db = new Database('http://127.0.0.1:8529'); - -/* the query string */ -var query = "FOR result IN TRAVERSAL(v, e, @vertex, 'inbound', @options) RETURN result"; - -/* bind parameters */ -var bindVars = { - vertex: "v/world", /* our start vertex */ - options: { - order: "preorder-expander", - visitor: "myfunctions::leafNodeVisitor", - visitorReturnsResults: true - } -}; - -db.query(query, bindVars, function (err, cursor) { - if (err) { - console.log('error: %j', err); - } else { - cursor.all(function(err2, list) { - if (err) { - console.log('error: %j', err2); - } else { - console.log("all document keys: %j", list); - } - }); - } -}); -``` - -**Author:** [Jan Steemann](https://github.com/jsteemann) - -**Tags**: #graph #traversal #aql #nodejs diff --git a/Documentation/Books/Cookbook/Graph/ExampleActorsAndMovies.md b/Documentation/Books/Cookbook/Graph/ExampleActorsAndMovies.md deleted file mode 100644 index 53544a8176f6..000000000000 --- a/Documentation/Books/Cookbook/Graph/ExampleActorsAndMovies.md +++ /dev/null @@ -1,790 +0,0 @@ -AQL Example Queries on an Actors and Movies Database -==================================================== - -Acknowledgments ---------------- - -On [Stackoverflow][1] the user [Vincz][2] asked for some example queries based on graphs. -So credits for this questions go to him. The datasets and queries have been taken from the guys of [neo4j](http://neo4j.com/docs/stable/cypherdoc-movie-database.html). Credits and thanks to them. -As I also think this examples are yet missing I decided to write this recipe. - - -Problem -------- - -(Copy from Stackoverflow) -Given a collection of **actors** and a collection of **movies**. And a **actIn** edges collection (with a **year** property) to connect the vertex. - -\[Actor\] ← act in → \[Movie\] - - How could I get: - -* All actors who acted in "movie1" OR "movie2" -* All actors who acted in both "movie1" AND "movie2" ? -* All common movies between "actor1" and "actor2" ? -* All actors who acted in 3 or more movies ? -* All movies where exactly 6 actors acted in ? -* The number of actors by movie ? -* The number of movies by actor ? -* The number of movies acted in between 2005 and 2010 by actor ? - - -Solution --------- - -During this solution we will be using arangosh to create and query the data. -All the AQL queries are strings and can simply be copied over to your favorite driver instead of arangosh. - -Create a Test Dataset in arangosh: - -```js -var actors = db._create("actors"); -var movies = db._create("movies"); -var actsIn = db._createEdgeCollection("actsIn"); - -var TheMatrix = movies.save({_key: "TheMatrix", title:'The Matrix', released:1999, tagline:'Welcome to the Real World'})._id; -var Keanu = actors.save({_key: "Keanu", name:'Keanu Reeves', born:1964})._id; -var Carrie = actors.save({_key: "Carrie", name:'Carrie-Anne Moss', born:1967})._id; -var Laurence = actors.save({_key: "Laurence", name:'Laurence Fishburne', born:1961})._id; -var Hugo = actors.save({_key: "Hugo", name:'Hugo Weaving', born:1960})._id; -var Emil = actors.save({_key: "Emil", name:"Emil Eifrem", born: 1978}); - -actsIn.save(Keanu, TheMatrix, {roles: ["Neo"], year: 1999}); -actsIn.save(Carrie, TheMatrix, {roles: ["Trinity"], year: 1999}); -actsIn.save(Laurence, TheMatrix, {roles: ["Morpheus"], year: 1999}); -actsIn.save(Hugo, TheMatrix, {roles: ["Agent Smith"], year: 1999}); -actsIn.save(Emil, TheMatrix, {roles: ["Emil"], year: 1999}); - -var TheMatrixReloaded = movies.save({_key: "TheMatrixReloaded", title: "The Matrix Reloaded", released: 2003, tagline: "Free your mind"}); -actsIn.save(Keanu, TheMatrixReloaded, {roles: ["Neo"], year: 2003}); -actsIn.save(Carrie, TheMatrixReloaded, {roles: ["Trinity"], year: 2003}); -actsIn.save(Laurence, TheMatrixReloaded, {roles: ["Morpheus"], year: 2003}); -actsIn.save(Hugo, TheMatrixReloaded, {roles: ["Agent Smith"], year: 2003}); - -var TheMatrixRevolutions = movies.save({_key: "TheMatrixRevolutions", title: "The Matrix Revolutions", released: 2003, tagline: "Everything that has a beginning has an end"}); -actsIn.save(Keanu, TheMatrixRevolutions, {roles: ["Neo"], year: 2003}); -actsIn.save(Carrie, TheMatrixRevolutions, {roles: ["Trinity"], year: 2003}); -actsIn.save(Laurence, TheMatrixRevolutions, {roles: ["Morpheus"], year: 2003}); -actsIn.save(Hugo, TheMatrixRevolutions, {roles: ["Agent Smith"], year: 2003}); - -var TheDevilsAdvocate = movies.save({_key: "TheDevilsAdvocate", title:"The Devil's Advocate", released:1997, tagline:'Evil has its winning ways'})._id; -var Charlize = actors.save({_key: "Charlize", name:'Charlize Theron', born:1975})._id; -var Al = actors.save({_key: "Al", name:'Al Pacino', born:1940})._id; -actsIn.save(Keanu, TheDevilsAdvocate, {roles: ["Kevin Lomax"], year: 1997}); -actsIn.save(Charlize, TheDevilsAdvocate, {roles: ["Mary Ann Lomax"], year: 1997}); -actsIn.save(Al, TheDevilsAdvocate, {roles: ["John Milton"], year: 1997}); - -var AFewGoodMen = movies.save({_key: "AFewGoodMen", title:"A Few Good Men", released:1992, tagline:"In the heart of the nation's capital, in a courthouse of the U.S. government, one man will stop at nothing to keep his honor, and one will stop at nothing to find the truth."})._id; -var TomC = actors.save({_key: "TomC", name:'Tom Cruise', born:1962})._id; -var JackN = actors.save({_key: "JackN", name:'Jack Nicholson', born:1937})._id; -var DemiM = actors.save({_key: "DemiM", name:'Demi Moore', born:1962})._id; -var KevinB = actors.save({_key:"KevinB", name:'Kevin Bacon', born:1958})._id; -var KieferS = actors.save({_key:"KieferS", name:'Kiefer Sutherland', born:1966})._id; -var NoahW = actors.save({_key:"NoahW", name:'Noah Wyle', born:1971})._id; -var CubaG = actors.save({_key:"CubaG", name:'Cuba Gooding Jr.', born:1968})._id; -var KevinP = actors.save({_key:"KevinP", name:'Kevin Pollak', born:1957})._id; -var JTW = actors.save({_key:"JTW", name:'J.T. Walsh', born:1943})._id; -var JamesM = actors.save({_key:"JamesM", name:'James Marshall', born:1967})._id; -var ChristopherG = actors.save({_key:"ChristopherG", name:'Christopher Guest', born:1948})._id; -actsIn.save(TomC,AFewGoodMen,{roles:['Lt. Daniel Kaffee'], year: 1992}); -actsIn.save(JackN,AFewGoodMen,{roles:['Col. Nathan R. Jessup'], year: 1992}); -actsIn.save(DemiM,AFewGoodMen,{roles:['Lt. Cdr. JoAnne Galloway'], year: 1992}); -actsIn.save(KevinB,AFewGoodMen,{roles:['Capt. Jack Ross'], year: 1992}); -actsIn.save(KieferS,AFewGoodMen,{ roles:['Lt. Jonathan Kendrick'], year: 1992}); -actsIn.save(NoahW,AFewGoodMen,{roles:['Cpl. Jeffrey Barnes'], year: 1992}); -actsIn.save(CubaG,AFewGoodMen,{ roles:['Cpl. Carl Hammaker'], year: 1992}); -actsIn.save(KevinP,AFewGoodMen,{roles:['Lt. Sam Weinberg'], year: 1992}); -actsIn.save(JTW,AFewGoodMen,{roles:['Lt. Col. Matthew Andrew Markinson'], year: 1992}); -actsIn.save(JamesM,AFewGoodMen,{roles:['Pfc. Louden Downey'], year: 1992}); -actsIn.save(ChristopherG,AFewGoodMen,{ roles:['Dr. Stone'], year: 1992}); - -var TopGun = movies.save({_key:"TopGun", title:"Top Gun", released:1986, tagline:'I feel the need, the need for speed.'})._id; -var KellyM = actors.save({_key:"KellyM", name:'Kelly McGillis', born:1957})._id; -var ValK = actors.save({_key:"ValK", name:'Val Kilmer', born:1959})._id; -var AnthonyE = actors.save({_key:"AnthonyE", name:'Anthony Edwards', born:1962})._id; -var TomS = actors.save({_key:"TomS", name:'Tom Skerritt', born:1933})._id; -var MegR = actors.save({_key:"MegR", name:'Meg Ryan', born:1961})._id; -actsIn.save(TomC,TopGun,{roles:['Maverick'], year: 1986}); -actsIn.save(KellyM,TopGun,{roles:['Charlie'], year: 1986}); -actsIn.save(ValK,TopGun,{roles:['Iceman'], year: 1986}); -actsIn.save(AnthonyE,TopGun,{roles:['Goose'], year: 1986}); -actsIn.save(TomS,TopGun,{roles:['Viper'], year: 1986}); -actsIn.save(MegR,TopGun,{roles:['Carole'], year: 1986}); - -var JerryMaguire = movies.save({_key:"JerryMaguire", title:'Jerry Maguire', released:2000, tagline:'The rest of his life begins now.'})._id; -var ReneeZ = actors.save({_key:"ReneeZ", name:'Renee Zellweger', born:1969})._id; -var KellyP = actors.save({_key:"KellyP", name:'Kelly Preston', born:1962})._id; -var JerryO = actors.save({_key:"JerryO", name:"Jerry O'Connell", born:1974})._id; -var JayM = actors.save({_key:"JayM", name:'Jay Mohr', born:1970})._id; -var BonnieH = actors.save({_key:"BonnieH", name:'Bonnie Hunt', born:1961})._id; -var ReginaK = actors.save({_key:"ReginaK", name:'Regina King', born:1971})._id; -var JonathanL = actors.save({_key:"JonathanL", name:'Jonathan Lipnicki', born:1996})._id; -actsIn.save(TomC,JerryMaguire,{roles:['Jerry Maguire'], year: 2000}); -actsIn.save(CubaG,JerryMaguire,{roles:['Rod Tidwell'], year: 2000}); -actsIn.save(ReneeZ,JerryMaguire,{roles:['Dorothy Boyd'], year: 2000}); -actsIn.save(KellyP,JerryMaguire,{roles:['Avery Bishop'], year: 2000}); -actsIn.save(JerryO,JerryMaguire,{roles:['Frank Cushman'], year: 2000}); -actsIn.save(JayM,JerryMaguire,{roles:['Bob Sugar'], year: 2000}); -actsIn.save(BonnieH,JerryMaguire,{roles:['Laurel Boyd'], year: 2000}); -actsIn.save(ReginaK,JerryMaguire,{roles:['Marcee Tidwell'], year: 2000}); -actsIn.save(JonathanL,JerryMaguire,{roles:['Ray Boyd'], year: 2000}); - -var StandByMe = movies.save({_key:"StandByMe", title:"Stand By Me", released:1986, tagline:"For some, it's the last real taste of innocence, and the first real taste of life. But for everyone, it's the time that memories are made of."})._id; -var RiverP = actors.save({_key:"RiverP", name:'River Phoenix', born:1970})._id; -var CoreyF = actors.save({_key:"CoreyF", name:'Corey Feldman', born:1971})._id; -var WilW = actors.save({_key:"WilW", name:'Wil Wheaton', born:1972})._id; -var JohnC = actors.save({_key:"JohnC", name:'John Cusack', born:1966})._id; -var MarshallB = actors.save({_key:"MarshallB", name:'Marshall Bell', born:1942})._id; -actsIn.save(WilW,StandByMe,{roles:['Gordie Lachance'], year: 1986}); -actsIn.save(RiverP,StandByMe,{roles:['Chris Chambers'], year: 1986}); -actsIn.save(JerryO,StandByMe,{roles:['Vern Tessio'], year: 1986}); -actsIn.save(CoreyF,StandByMe,{roles:['Teddy Duchamp'], year: 1986}); -actsIn.save(JohnC,StandByMe,{roles:['Denny Lachance'], year: 1986}); -actsIn.save(KieferS,StandByMe,{roles:['Ace Merrill'], year: 1986}); -actsIn.save(MarshallB,StandByMe,{roles:['Mr. Lachance'], year: 1986}); - -var AsGoodAsItGets = movies.save({_key:"AsGoodAsItGets", title:'As Good as It Gets', released:1997, tagline:'A comedy from the heart that goes for the throat.'})._id; -var HelenH = actors.save({_key:"HelenH", name:'Helen Hunt', born:1963})._id; -var GregK = actors.save({_key:"GregK", name:'Greg Kinnear', born:1963})._id; -actsIn.save(JackN,AsGoodAsItGets,{roles:['Melvin Udall'], year: 1997}); -actsIn.save(HelenH,AsGoodAsItGets,{roles:['Carol Connelly'], year: 1997}); -actsIn.save(GregK,AsGoodAsItGets,{roles:['Simon Bishop'], year: 1997}); -actsIn.save(CubaG,AsGoodAsItGets,{roles:['Frank Sachs'], year: 1997}); - -var WhatDreamsMayCome = movies.save({_key:"WhatDreamsMayCome", title:'What Dreams May Come', released:1998, tagline:'After life there is more. The end is just the beginning.'})._id; -var AnnabellaS = actors.save({_key:"AnnabellaS", name:'Annabella Sciorra', born:1960})._id; -var MaxS = actors.save({_key:"MaxS", name:'Max von Sydow', born:1929})._id; -var WernerH = actors.save({_key:"WernerH", name:'Werner Herzog', born:1942})._id; -var Robin = actors.save({_key:"Robin", name:'Robin Williams', born:1951})._id; -actsIn.save(Robin,WhatDreamsMayCome,{roles:['Chris Nielsen'], year: 1998}); -actsIn.save(CubaG,WhatDreamsMayCome,{roles:['Albert Lewis'], year: 1998}); -actsIn.save(AnnabellaS,WhatDreamsMayCome,{roles:['Annie Collins-Nielsen'], year: 1998}); -actsIn.save(MaxS,WhatDreamsMayCome,{roles:['The Tracker'], year: 1998}); -actsIn.save(WernerH,WhatDreamsMayCome,{roles:['The Face'], year: 1998}); - -var SnowFallingonCedars = movies.save({_key:"SnowFallingonCedars", title:'Snow Falling on Cedars', released:1999, tagline:'First loves last. Forever.'})._id; -var EthanH = actors.save({_key:"EthanH", name:'Ethan Hawke', born:1970})._id; -var RickY = actors.save({_key:"RickY", name:'Rick Yune', born:1971})._id; -var JamesC = actors.save({_key:"JamesC", name:'James Cromwell', born:1940})._id; -actsIn.save(EthanH,SnowFallingonCedars,{roles:['Ishmael Chambers'], year: 1999}); -actsIn.save(RickY,SnowFallingonCedars,{roles:['Kazuo Miyamoto'], year: 1999}); -actsIn.save(MaxS,SnowFallingonCedars,{roles:['Nels Gudmundsson'], year: 1999}); -actsIn.save(JamesC,SnowFallingonCedars,{roles:['Judge Fielding'], year: 1999}); - -var YouveGotMail = movies.save({_key:"YouveGotMail", title:"You've Got Mail", released:1998, tagline:'At odds in life... in love on-line.'})._id; -var ParkerP = actors.save({_key:"ParkerP", name:'Parker Posey', born:1968})._id; -var DaveC = actors.save({_key:"DaveC", name:'Dave Chappelle', born:1973})._id; -var SteveZ = actors.save({_key:"SteveZ", name:'Steve Zahn', born:1967})._id; -var TomH = actors.save({_key:"TomH", name:'Tom Hanks', born:1956})._id; -actsIn.save(TomH,YouveGotMail,{roles:['Joe Fox'], year: 1998}); -actsIn.save(MegR,YouveGotMail,{roles:['Kathleen Kelly'], year: 1998}); -actsIn.save(GregK,YouveGotMail,{roles:['Frank Navasky'], year: 1998}); -actsIn.save(ParkerP,YouveGotMail,{roles:['Patricia Eden'], year: 1998}); -actsIn.save(DaveC,YouveGotMail,{roles:['Kevin Jackson'], year: 1998}); -actsIn.save(SteveZ,YouveGotMail,{roles:['George Pappas'], year: 1998}); - -var SleeplessInSeattle = movies.save({_key:"SleeplessInSeattle", title:'Sleepless in Seattle', released:1993, tagline:'What if someone you never met, someone you never saw, someone you never knew was the only someone for you?'})._id; -var RitaW = actors.save({_key:"RitaW", name:'Rita Wilson', born:1956})._id; -var BillPull = actors.save({_key:"BillPull", name:'Bill Pullman', born:1953})._id; -var VictorG = actors.save({_key:"VictorG", name:'Victor Garber', born:1949})._id; -var RosieO = actors.save({_key:"RosieO", name:"Rosie O'Donnell", born:1962})._id; -actsIn.save(TomH,SleeplessInSeattle,{roles:['Sam Baldwin'], year: 1993}); -actsIn.save(MegR,SleeplessInSeattle,{roles:['Annie Reed'], year: 1993}); -actsIn.save(RitaW,SleeplessInSeattle,{roles:['Suzy'], year: 1993}); -actsIn.save(BillPull,SleeplessInSeattle,{roles:['Walter'], year: 1993}); -actsIn.save(VictorG,SleeplessInSeattle,{roles:['Greg'], year: 1993}); -actsIn.save(RosieO,SleeplessInSeattle,{roles:['Becky'], year: 1993}); - -var JoeVersustheVolcano = movies.save({_key:"JoeVersustheVolcano", title:'Joe Versus the Volcano', released:1990, tagline:'A story of love, lava and burning desire.'})._id; -var Nathan = actors.save({_key:"Nathan", name:'Nathan Lane', born:1956})._id; -actsIn.save(TomH,JoeVersustheVolcano,{roles:['Joe Banks'], year: 1990}); -actsIn.save(MegR,JoeVersustheVolcano,{roles:['DeDe', 'Angelica Graynamore', 'Patricia Graynamore'], year: 1990}); -actsIn.save(Nathan,JoeVersustheVolcano,{roles:['Baw'], year: 1990}); - -var WhenHarryMetSally = movies.save({_key:"WhenHarryMetSally", title:'When Harry Met Sally', released:1998, tagline:'At odds in life... in love on-line.'})._id; -var BillyC = actors.save({_key:"BillyC", name:'Billy Crystal', born:1948})._id; -var CarrieF = actors.save({_key:"CarrieF", name:'Carrie Fisher', born:1956})._id; -var BrunoK = actors.save({_key:"BrunoK", name:'Bruno Kirby', born:1949})._id; -actsIn.save(BillyC,WhenHarryMetSally,{roles:['Harry Burns'], year: 1998}); -actsIn.save(MegR,WhenHarryMetSally,{roles:['Sally Albright'], year: 1998}); -actsIn.save(CarrieF,WhenHarryMetSally,{roles:['Marie'], year: 1998}); -actsIn.save(BrunoK,WhenHarryMetSally,{roles:['Jess'], year: 1998}); -``` - - -All actors who acted in "movie1" OR "movie2" --------------------------------------------- - -Say we want to find all actors who acted in "TheMatrix" OR "TheDevilsAdvocate": - -First lets try to get all actors for one movie: - -```js -db._query("FOR x IN ANY 'movies/TheMatrix' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN x._id").toArray(); -``` - -Result: -```json -[ - [ - "actors/Keanu", - "actors/Hugo", - "actors/Emil", - "actors/Carrie", - "actors/Laurence" - ] -] -``` - -Now we continue to form a UNION_DISTINCT of two NEIGHBORS queries which will be the solution: - -```js -db._query("FOR x IN UNION_DISTINCT ((FOR y IN ANY 'movies/TheMatrix' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN y._id), (FOR y IN ANY 'movies/TheDevilsAdvocate' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN y._id)) RETURN x").toArray(); -``` - -```json -[ - [ - "actors/Emil", - "actors/Hugo", - "actors/Carrie", - "actors/Laurence", - "actors/Keanu", - "actors/Al", - "actors/Charlize" - ] -] -``` - - -All actors who acted in both "movie1" AND "movie2" ? ----------------------------------------------------- - -This is almost identical to the question above. -But this time we are not intrested in a UNION but in a INTERSECTION: - -```js -db._query("FOR x IN INTERSECTION ((FOR y IN ANY 'movies/TheMatrix' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN y._id), (FOR y IN ANY 'movies/TheDevilsAdvocate' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN y._id)) RETURN x").toArray(); -``` - -```json -[ - [ - "actors/Keanu" - ] -] -``` - - -All common movies between "actor1" and "actor2" ? -------------------------------------------------- - -This is actually identical to the question about common actors in movie1 and movie2. -We just have to change the starting vertices. -As an example let's find all movies where Hugo Weaving ("Hugo") and Keanu Reeves are co-starring: - -```js -db._query("FOR x IN INTERSECTION ((FOR y IN ANY 'actors/Hugo' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN y._id), (FOR y IN ANY 'actors/Keanu' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN y._id)) RETURN x").toArray(); -``` - -```json -[ - [ - "movies/TheMatrixRevolutions", - "movies/TheMatrixReloaded", - "movies/TheMatrix" - ] -] -``` - - -All actors who acted in 3 or more movies ? ------------------------------------------- - -This question is different, we cannot make use of the neighbors function here. -Instead we will make use of the edge-index and the COLLECT statement of AQL for grouping. -The basic idea is to group all edges by their startVertex (which in this dataset is always the actor). -Then we remove all actors with less than 3 movies from the result. -As I am also interested in the number of movies an actor has acted in, I included the value in the result as well: - -```js -db._query("FOR x IN actsIn COLLECT actor = x._from WITH COUNT INTO counter FILTER counter >= 3 RETURN {actor: actor, movies: counter}").toArray() -``` - -```json -[ - { - "actor" : "actors/Carrie", - "movies" : 3 - }, - { - "actor" : "actors/CubaG", - "movies" : 4 - }, - { - "actor" : "actors/Hugo", - "movies" : 3 - }, - { - "actor" : "actors/Keanu", - "movies" : 4 - }, - { - "actor" : "actors/Laurence", - "movies" : 3 - }, - { - "actor" : "actors/MegR", - "movies" : 5 - }, - { - "actor" : "actors/TomC", - "movies" : 3 - }, - { - "actor" : "actors/TomH", - "movies" : 3 - } -] -``` - - -All movies where exactly 6 actors acted in ? --------------------------------------------- - -The same idea as in the query before, but with equality filter, however now we need the movie instead of the actor, so we return the _to attribute: - -```js -db._query("FOR x IN actsIn COLLECT movie = x._to WITH COUNT INTO counter FILTER counter == 6 RETURN movie").toArray() -``` - -```json -[ - "movies/SleeplessInSeattle", - "movies/TopGun", - "movies/YouveGotMail" -] -``` - - -The number of actors by movie ? -------------------------------- - -We remember in our dataset _to on the edge corresponds to the movie, so we count how often the same _to appears. -This is the number of actors. -The query is almost identical to the ones before but without the FILTER after COLLECT: - -```js -db._query("FOR x IN actsIn COLLECT movie = x._to WITH COUNT INTO counter RETURN {movie: movie, actors: counter}").toArray() -``` - -```json -[ - { - "movie" : "movies/AFewGoodMen", - "actors" : 11 - }, - { - "movie" : "movies/AsGoodAsItGets", - "actors" : 4 - }, - { - "movie" : "movies/JerryMaguire", - "actors" : 9 - }, - { - "movie" : "movies/JoeVersustheVolcano", - "actors" : 3 - }, - { - "movie" : "movies/SleeplessInSeattle", - "actors" : 6 - }, - { - "movie" : "movies/SnowFallingonCedars", - "actors" : 4 - }, - { - "movie" : "movies/StandByMe", - "actors" : 7 - }, - { - "movie" : "movies/TheDevilsAdvocate", - "actors" : 3 - }, - { - "movie" : "movies/TheMatrix", - "actors" : 5 - }, - { - "movie" : "movies/TheMatrixReloaded", - "actors" : 4 - }, - { - "movie" : "movies/TheMatrixRevolutions", - "actors" : 4 - }, - { - "movie" : "movies/TopGun", - "actors" : 6 - }, - { - "movie" : "movies/WhatDreamsMayCome", - "actors" : 5 - }, - { - "movie" : "movies/WhenHarryMetSally", - "actors" : 4 - }, - { - "movie" : "movies/YouveGotMail", - "actors" : 6 - } -] -``` - - -The number of movies by actor ? -------------------------------- - -I think you get the picture by now ;) - -```js -db._query("FOR x IN actsIn COLLECT actor = x._from WITH COUNT INTO counter RETURN {actor: actor, movies: counter}").toArray() -``` - -```json -[ - { - "actor" : "actors/Al", - "movies" : 1 - }, - { - "actor" : "actors/AnnabellaS", - "movies" : 1 - }, - { - "actor" : "actors/AnthonyE", - "movies" : 1 - }, - { - "actor" : "actors/BillPull", - "movies" : 1 - }, - { - "actor" : "actors/BillyC", - "movies" : 1 - }, - { - "actor" : "actors/BonnieH", - "movies" : 1 - }, - { - "actor" : "actors/BrunoK", - "movies" : 1 - }, - { - "actor" : "actors/Carrie", - "movies" : 3 - }, - { - "actor" : "actors/CarrieF", - "movies" : 1 - }, - { - "actor" : "actors/Charlize", - "movies" : 1 - }, - { - "actor" : "actors/ChristopherG", - "movies" : 1 - }, - { - "actor" : "actors/CoreyF", - "movies" : 1 - }, - { - "actor" : "actors/CubaG", - "movies" : 4 - }, - { - "actor" : "actors/DaveC", - "movies" : 1 - }, - { - "actor" : "actors/DemiM", - "movies" : 1 - }, - { - "actor" : "actors/Emil", - "movies" : 1 - }, - { - "actor" : "actors/EthanH", - "movies" : 1 - }, - { - "actor" : "actors/GregK", - "movies" : 2 - }, - { - "actor" : "actors/HelenH", - "movies" : 1 - }, - { - "actor" : "actors/Hugo", - "movies" : 3 - }, - { - "actor" : "actors/JackN", - "movies" : 2 - }, - { - "actor" : "actors/JamesC", - "movies" : 1 - }, - { - "actor" : "actors/JamesM", - "movies" : 1 - }, - { - "actor" : "actors/JayM", - "movies" : 1 - }, - { - "actor" : "actors/JerryO", - "movies" : 2 - }, - { - "actor" : "actors/JohnC", - "movies" : 1 - }, - { - "actor" : "actors/JonathanL", - "movies" : 1 - }, - { - "actor" : "actors/JTW", - "movies" : 1 - }, - { - "actor" : "actors/Keanu", - "movies" : 4 - }, - { - "actor" : "actors/KellyM", - "movies" : 1 - }, - { - "actor" : "actors/KellyP", - "movies" : 1 - }, - { - "actor" : "actors/KevinB", - "movies" : 1 - }, - { - "actor" : "actors/KevinP", - "movies" : 1 - }, - { - "actor" : "actors/KieferS", - "movies" : 2 - }, - { - "actor" : "actors/Laurence", - "movies" : 3 - }, - { - "actor" : "actors/MarshallB", - "movies" : 1 - }, - { - "actor" : "actors/MaxS", - "movies" : 2 - }, - { - "actor" : "actors/MegR", - "movies" : 5 - }, - { - "actor" : "actors/Nathan", - "movies" : 1 - }, - { - "actor" : "actors/NoahW", - "movies" : 1 - }, - { - "actor" : "actors/ParkerP", - "movies" : 1 - }, - { - "actor" : "actors/ReginaK", - "movies" : 1 - }, - { - "actor" : "actors/ReneeZ", - "movies" : 1 - }, - { - "actor" : "actors/RickY", - "movies" : 1 - }, - { - "actor" : "actors/RitaW", - "movies" : 1 - }, - { - "actor" : "actors/RiverP", - "movies" : 1 - }, - { - "actor" : "actors/Robin", - "movies" : 1 - }, - { - "actor" : "actors/RosieO", - "movies" : 1 - }, - { - "actor" : "actors/SteveZ", - "movies" : 1 - }, - { - "actor" : "actors/TomC", - "movies" : 3 - }, - { - "actor" : "actors/TomH", - "movies" : 3 - }, - { - "actor" : "actors/TomS", - "movies" : 1 - }, - { - "actor" : "actors/ValK", - "movies" : 1 - }, - { - "actor" : "actors/VictorG", - "movies" : 1 - }, - { - "actor" : "actors/WernerH", - "movies" : 1 - }, - { - "actor" : "actors/WilW", - "movies" : 1 - } -] -``` - - -The number of movies acted in between 2005 and 2010 by actor ? --------------------------------------------------------------- - -This query is where a Multi Model database actually shines. -First of all we want to use it in production, so we set a skiplistindex on year. -This allows as to execute fast range queries like between 2005 and 2010. - -```js -db.actsIn.ensureSkiplist("year") -``` - -Now we slightly modify our movies by actor query. -However my dataset contains only older movies, so I changed the year range from 1990 - 1995: - -```js -db._query("FOR x IN actsIn FILTER x.year >= 1990 && x.year <= 1995 COLLECT actor = x._from WITH COUNT INTO counter RETURN {actor: actor, movies: counter}").toArray() -``` - -```json -[ - { - "actor" : "actors/BillPull", - "movies" : 1 - }, - { - "actor" : "actors/ChristopherG", - "movies" : 1 - }, - { - "actor" : "actors/CubaG", - "movies" : 1 - }, - { - "actor" : "actors/DemiM", - "movies" : 1 - }, - { - "actor" : "actors/JackN", - "movies" : 1 - }, - { - "actor" : "actors/JamesM", - "movies" : 1 - }, - { - "actor" : "actors/JTW", - "movies" : 1 - }, - { - "actor" : "actors/KevinB", - "movies" : 1 - }, - { - "actor" : "actors/KevinP", - "movies" : 1 - }, - { - "actor" : "actors/KieferS", - "movies" : 1 - }, - { - "actor" : "actors/MegR", - "movies" : 2 - }, - { - "actor" : "actors/Nathan", - "movies" : 1 - }, - { - "actor" : "actors/NoahW", - "movies" : 1 - }, - { - "actor" : "actors/RitaW", - "movies" : 1 - }, - { - "actor" : "actors/RosieO", - "movies" : 1 - }, - { - "actor" : "actors/TomC", - "movies" : 1 - }, - { - "actor" : "actors/TomH", - "movies" : 2 - }, - { - "actor" : "actors/VictorG", - "movies" : 1 - } -] -``` - - -Comment -------- - -**Author:** [Michael Hackstein](https://github.com/mchacki) - -**Tags:** #graph #examples - -[1]: http://stackoverflow.com/questions/32729314/aql-graph-queries-examples -[2]: http://stackoverflow.com/users/1126414/vincz diff --git a/Documentation/Books/Cookbook/Graph/FulldepthTraversal.md b/Documentation/Books/Cookbook/Graph/FulldepthTraversal.md deleted file mode 100644 index a75d27ed8e14..000000000000 --- a/Documentation/Books/Cookbook/Graph/FulldepthTraversal.md +++ /dev/null @@ -1,81 +0,0 @@ -Fulldepth Graph-Traversal -========================= - -Problem -------- - -Lets assume you have a database and some edges and vertices. Now you need the node with the most connections in fulldepth. - -Solution --------- - -You need a custom traversal with the following properties: - -- Store all vertices you have visited already -- If you visit an already visited vertex return the connections + 1 and do not touch the edges -- If you visit a fresh vertex visit all its children and sum up their connections. Store this sum and return it + 1 -- Repeat for all vertices. - -```js -var traversal = require("org/arangodb/graph/traversal"); - -var knownFilter = function(config, vertex, path) { - if (config.known[vertex._key] !== undefined) { - return "prune"; - } - return ""; -}; - -var sumVisitor = function(config, result, vertex, path) { - if (config.known[vertex._key] !== undefined) { - result.sum += config.known[vertex._key]; - } else { - config.known[vertex._key] = result.sum; - } - result.sum += 1; - return; -}; - -var config = { - datasource: traversal.collectionDatasourceFactory(db.e), // e is my edge collection - strategy: "depthfirst", - order: "preorder", - filter: knownFilter, - expander: traversal.outboundExpander, - visitor: sumVisitor, - known: {} -}; - -var traverser = new traversal.Traverser(config); -var cursor = db.v.all(); // v is my vertex collection -while(cursor.hasNext()) { - var node = cursor.next(); - traverser.traverse({sum: 0}, node); -} - -config.known; // Returns the result of type name: counter. In arangosh this will print out complete result -``` - -To execute this script accordingly replace db.v and db.e with your collections -(v is vertices, e is edges) and write it to a file, e.g. traverse.js, -then execute it in arangosh: - -``` -cat traverse.js | arangosh -``` - -If you want to use it in production you should have a look at the Foxx framework which allows -you to store and execute this script on server side and make it accessible via your own API: -[Foxx](../../Manual/Foxx/index.html) - - -Comment -------- - -You only compute the connections of one vertex once and cache it then. -Complexity is almost equal to the amount of edges. -In the code below config.known contains the result of all vertices, you then can add the sorting on it. - -**Author:** [Michael Hackstein](https://github.com/mchacki) - -**Tags:** #graph diff --git a/Documentation/Books/Cookbook/Graph/README.md b/Documentation/Books/Cookbook/Graph/README.md deleted file mode 100644 index 9204e2d9b872..000000000000 --- a/Documentation/Books/Cookbook/Graph/README.md +++ /dev/null @@ -1,8 +0,0 @@ -Graph -===== - -- [Fulldepth Graph-Traversal](FulldepthTraversal.md) - -- [Using a custom Visitor](CustomVisitorFromNodeJs.md) - -- [Example AQL Queries for Graphs](ExampleActorsAndMovies.md) diff --git a/Documentation/Books/Cookbook/Monitoring/Collectd.md b/Documentation/Books/Cookbook/Monitoring/Collectd.md deleted file mode 100644 index 778d14dab730..000000000000 --- a/Documentation/Books/Cookbook/Monitoring/Collectd.md +++ /dev/null @@ -1,351 +0,0 @@ -Monitoring ArangoDB using collectd -================================== - -Problem -------- - -The ArangoDB web interface shows a nice summary of the current state. I want to see similar numbers in my monitoring system so I can analyze the system usage post mortem or send alarms on failure. - -Solution --------- - -[Collectd](http://collectd.org) is an excellent tool to gather all kinds of metrics from a system, -and deliver it to a central monitoring like [Graphite](http://graphite.wikidot.com/screen-shots) -and / or [Nagios](http://www.nagios.org/). - -### Ingredients - -For this recipe you need to install the following tools: - -- [collectd >= 5.4.2](https://collectd.org/) The aggregation Daemon -- [kcollectd](https://www.forwiss.uni-passau.de/~berberic/Linux/kcollectd.html) for inspecting the data - -### Configuring collectd - -For aggregating the values we will use the [cURL-JSON plug-in](https://collectd.org/wiki/index.php/Plugin:cURL-JSON). -We will store the values using the [Round-Robin-Database writer](https://collectd.org/wiki/index.php/RRD)(RRD) which `kcollectd` can later on present to you. - -We assume your `collectd` comes from your distribution and reads its config from `/etc/collectd/collectd.conf`. Since this file tends to become pretty unreadable quickly, we use the `include` mechanism: - - - Filter "*.conf" - - -This way we can make each metric group on compact set config files. It consists of three components: - -* loading the plug-in -* adding metrics to the TypesDB -* the configuration for the plug-in itself - -### rrdtool - -We will use the [Round-Robin-Database](http://oss.oetiker.ch/rrdtool/) as storage backend for now. It creates its own database files of fixed size for each specific time range. Later you may choose more advanced writer-plug-ins, which may do network distribution of your metrics or integrate the above mentioned Graphite or your already established monitoring, etc. - -For the RRD we will go pretty much with defaults: - - # Load the plug-in: - LoadPlugin rrdtool - - DataDir "/var/lib/collectd/rrd" - # CacheTimeout 120 - # CacheFlush 900 - # WritesPerSecond 30 - # CreateFilesAsync false - # RandomTimeout 0 - # - # The following settings are rather advanced - # and should usually not be touched: - # StepSize 10 - # HeartBeat 20 - # RRARows 1200 - # RRATimespan 158112000 - # XFF 0.1 - - -### cURL JSON - -`Collectd` comes with a wide range of metric aggregation plug-ins. -Many tools today use [JSON](http://json.org) as data formatting grammar; so does ArangoDB. - -Therefore a plug-in offering to fetch JSON documents via HTTP is the perfect match to query ArangoDBs [administrative Statistics interface](../../HTTP/AdministrationAndMonitoring/index.html#read-the-statistics): - - # Load the plug-in: - LoadPlugin curl_json - # we need to use our own types to generate individual names for our gauges: - # TypesDB "/etc/collectd/arangodb_types.db" - - # Adjust the URL so collectd can reach your arangod: - - # Set your authentication to Aardvark here: - User "root" - # Password "bar" - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - Type "gauge" - - - - Type "client_totalTime_count" - - - Type "client_totalTime_sum" - - - Type "client_totalTime_counts0" - - - - Type "client_bytesReceived_count" - - - Type "client_bytesReceived_sum" - - - Type "client_bytesReceived_counts0" - - - - Type "client_requestTime_count" - - - Type "client_requestTime_sum" - - - Type "client_requestTime_counts0" - - - - Type "client_connectionTime_count" - - - Type "client_connectionTime_sum" - - - Type "client_connectionTime_counts0" - - - - Type "client_queueTime_count" - - - Type "client_queueTime_sum" - - - Type "client_queueTime_counts0" - - - - Type "client_bytesSent_count" - - - Type "client_bytesSent_sum" - - - Type "client_bytesSent_counts0" - - - - Type "client_ioTime_count" - - - Type "client_ioTime_sum" - - - Type "client_ioTime_counts0" - - - - Type "gauge" - - - - -To circumvent the shortcoming of the curl_JSON plug-in to only take the last path element as name for the metric, we need to give them a name using our own `types.db` file in `/etc/collectd/arangodb_types.db`: - - client_totalTime_count value:GAUGE:0:9223372036854775807 - client_totalTime_sum value:GAUGE:U:U - client_totalTime_counts0 value:GAUGE:U:U - - client_bytesReceived_count value:GAUGE:0:9223372036854775807 - client_bytesReceived_sum value:GAUGE:U:U - client_bytesReceived_counts0 value:GAUGE:U:U - - client_requestTime_count value:GAUGE:0:9223372036854775807 - client_requestTime_sum value:GAUGE:U:U - client_requestTime_counts0 value:GAUGE:U:U - - client_connectionTime_count value:GAUGE:0:9223372036854775807 - client_connectionTime_sum value:GAUGE:U:U - client_connectionTime_counts0 value:GAUGE:U:U - - client_queueTime_count value:GAUGE:0:9223372036854775807 - client_queueTime_sum value:GAUGE:U:U - client_queueTime_counts0 value:GAUGE:U:U - - client_bytesSent_count value:GAUGE:0:9223372036854775807 - client_bytesSent_sum value:GAUGE:U:U - client_bytesSent_counts0 value:GAUGE:U:U - - client_ioTime_count value:GAUGE:0:9223372036854775807 - client_ioTime_sum value:GAUGE:U:U - client_ioTime_counts0 value:GAUGE:U:U - -Please note that you probably need to uncomment this line from the main collectd.conf: - - # TypesDB "/usr/share/collectd/types.db" "/etc/collectd/my_types.db" - -in order to make it still load its main types definition file. - -### Rolling your own - -You may want to monitor your own metrics from ArangoDB. Here is a simple example how to use the `config`: - - { - "testArray":[1,2], - "testArrayInbetween":[{"blarg":3},{"blub":4}], - "testDirectHit":5, - "testSubLevelHit":{"oneMoreLevel":6} - } - -This `config` snippet will parse the JSON above: - - - Type "gauge" - # Expect: 1 - - - Type "gauge" - # Expect: 2 - - - Type "gauge" - # Expect: 3 - - - Type "gauge" - # Expect: 4 - - - Type "gauge" - # Expect: 5 - - - Type "gauge" - # Expect: 6 - - # Adjust the URL so collectd can reach your arangod: - - # Set your authentication to Aardvark here: - # User "foo" - # Password "bar" - - Type "the_values" - - - Type "first_values" - - - Type "second_values" - - - Type "third_values" - - - Type "fourth_values" - - - Type "fifth_values" - - - -``` - -To get nice metric names, we specify our own `types.db` file in `/etc/collectd/collectd.conf.d/foxx_simple_types.db`: - -``` -the_values value:GAUGE:U:U -first_values value:GAUGE:U:U -second_values value:GAUGE:U:U -third_values value:GAUGE:U:U -fourth_values value:GAUGE:U:U -fifth_values value:GAUGE:U:U -``` - -**Author:** [Wilfried Goesgens](https://github.com/dothebart) - -**Tags:** #monitoring #foxx #json diff --git a/Documentation/Books/Cookbook/Monitoring/OtherRelevantMetrics.md b/Documentation/Books/Cookbook/Monitoring/OtherRelevantMetrics.md deleted file mode 100644 index 806d57685d09..000000000000 --- a/Documentation/Books/Cookbook/Monitoring/OtherRelevantMetrics.md +++ /dev/null @@ -1,137 +0,0 @@ -Monitoring other relevant metrics of ArangoDB -============================================= - -Problem -------- - -Aside of the values which ArangoDB already offers for monitoring, other system metrics may be relevant for continuously operating ArangoDB. be it a single instance or a cluster setup. [Collectd offers a pleathora of plugins](https://collectd.org/wiki/index.php/Table_of_Plugins) - lets have a look at some of them which may be useful for us. - -Solution --------- - -### Ingedients - -For this recipe you need to install the following tools: - -- [collectd](https://collectd.org/): The metrics aggregation Daemon -- we base on [Monitoring with Collecd recipe](Collectd.md) for understanding the basics about collectd - -### Disk usage -You may want to monitor that ArangoDB doesn't run out of disk space. The [df Plugin](https://collectd.org/wiki/index.php/Plugin:DF) can aggregate these values for you. - -First we need to find out which disks are used by your ArangoDB. By default you need to find **/var/lib/arango** in the mount points. Since nowadays many virtual file systems are also mounted on a typical \*nix system we want to sort the output of mount: - - mount | sort - /dev/sda3 on /local/home type ext4 (rw,relatime,data=ordered) - /dev/sda4 on / type ext4 (rw,relatime,data=ordered) - /dev/sdb1 on /mnt type vfat (rw,relatime,fmask=0022,dmask=0022,codepage=437,iocharset=utf8,shortname=mixed,errors=remount-ro) - binfmt_misc on /proc/sys/fs/binfmt_misc type binfmt_misc (rw,relatime) - cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio) - .... - udev on /dev type devtmpfs (rw,relatime,size=10240k,nr_inodes=1022123,mode=755) - -So here we can see the mount points are `/`, `/local/home`, `/mnt/` so `/var/lib/` can be found on the root partition (`/`) `/dev/sda3` here. A production setup may be different so the OS doesn't interfere with the services. - -The collectd configuration `/etc/collectd/collectd.conf.d/diskusage.conf` looks like this: - - LoadPlugin df - - Device "/dev/sda3" - # Device "192.168.0.2:/mnt/nfs" - # MountPoint "/home" - # FSType "ext4" - # ignore rootfs; else, the root file-system would appear twice, causing - # one of the updates to fail and spam the log - FSType rootfs - # ignore the usual virtual / temporary file-systems - FSType sysfs - FSType proc - FSType devtmpfs - FSType devpts - FSType tmpfs - FSType fusectl - FSType cgroup - IgnoreSelected true - # ReportByDevice false - # ReportReserved false - # ReportInodes false - # ValuesAbsolute true - # ValuesPercentage false - - -### Disk I/O Usage - -Another interesting metric is the amount of data read/written to disk - its an estimate how busy your ArangoDB or the whole system currently is. -The [Disk plugin](https://collectd.org/wiki/index.php/Plugin:Disk) aggregates these values. - -According to the mount points above our configuration `/etc/collectd/collectd.conf.d/disk_io.conf` looks like this: - - LoadPlugin disk - - Disk "hda" - Disk "/sda[23]/" - IgnoreSelected false - - - -### CPU Usage - -While the ArangoDB self monitoring already offers some overview of the running threads etc. you can get a deeper view using the [Process Plugin](https://collectd.org/wiki/index.php/Plugin:Processes). - -If you're running a single Arango instance, a simple match by process name is sufficient, `/etc/collectd/collectd.conf.d/arango_process.conf` looks like this: - - LoadPlugin processes - - Process "arangod" - - -If you're running a cluster, you can match the specific instances by command-line parameters, `/etc/collectd/collectd.conf.d/arango_cluster.conf` looks like this: - - LoadPlugin processes - - ProcessMatch "Claus" "/usr/bin/arangod .*--cluster.my-address *:8530" - ProcessMatch "Pavel" "/usr/bin/arangod .*--cluster.my-address *:8629" - ProcessMatch "Perry" "/usr/bin/arangod .*--cluster.my-address *:8630" - Process "etcd-arango" - - -### More Plugins - -As mentioned above, the list of available plugins is huge; Here are some more one could be interested in: -- use the [CPU Plugin](https://collectd.org/wiki/index.php/CPU) to monitor the overall CPU utilization -- use the [Memory Plugin](https://collectd.org/wiki/index.php/Plugin:Memory) to monitor main memory availability -- use the [Swap Plugin](https://collectd.org/documentation/manpages/collectd.conf.5.shtml#plugin_swap) - to see whether excess RAM usage forces the system to page and thus slow down -- [Ethernet Statistics](https://collectd.org/wiki/index.php/Plugin:Ethstat) - with whats going on at your Network cards to get a more broad overview of network traffic -- you may [Tail logfiles](https://collectd.org/wiki/index.php/Plugin:Tail) - like an apache request log and pick specific requests by regular expressions -- [Parse tabular files](https://collectd.org/wiki/index.php/Plugin:Table) in the `/proc` file system -- you can use [filters](https://collectd.org/documentation/manpages/collectd.conf.5.shtml#filter_configuration) - to reduce the amount of data created by plugins (i.e. if you have many CPU cores, you may want the combined result). - It can also decide where to route data and to which writer plugin -- while you may have seen that metrics are stored at a fixed rate or frequency, - your metrics (i.e. the durations of web requests) may come in a random & higher frequency. - Thus you want to burn them down to a fixed frequency, and know Min/Max/Average/Median. - So you want to [Aggregate values using the statsd pattern](https://collectd.org/wiki/index.php/Plugin:StatsD). -- You may start rolling your own in [Python](https://collectd.org/wiki/index.php/Plugin:Python), - [java](https://collectd.org/wiki/index.php/Plugin:Java), - [Perl](https://collectd.org/wiki/index.php/Plugin:Perl) or for sure in - [C](https://collectd.org/wiki/index.php/Plugin_architecture), the language collectd is implemented in - -Finally while kcollectd is nice to get a quick success at inspecting your collected metrics during working your way into collectd, -its not as sufficient for operating a production site. Since collectds default storage RRD is already widespread in system monitoring, -there are [many webfrontents](https://collectd.org/wiki/index.php/List_of_front-ends) to choose for the visualization. -Some of them replace the RRD storage by simply adding a writer plugin, -most prominent the [Graphite graphing framework](http://graphite.wikidot.com/screen-shots) with the -[Graphite writer](https://collectd.org/wiki/index.php/Plugin:Write_Graphite) which allows you to combine random metrics in single graphs -- to find coincidences in your data [you never dreamed of](http://metrics20.org/media/). - -If you already run [Nagios](http://www.nagios.org) you can use the -[Nagios tool](https://collectd.org/documentation/manpages/collectd-nagios.1.shtml) to submit values. - -We hope you now have a good overview of whats possible, but as usual its a good idea to browse the [Fine Manual](https://collectd.org/documentation.shtml). - -**Author:** [Wilfried Goesgens](https://github.com/dothebart) - -**Tags:** #monitoring diff --git a/Documentation/Books/Cookbook/Monitoring/SlaveStatus.md b/Documentation/Books/Cookbook/Monitoring/SlaveStatus.md deleted file mode 100644 index 264a4e3f565e..000000000000 --- a/Documentation/Books/Cookbook/Monitoring/SlaveStatus.md +++ /dev/null @@ -1,132 +0,0 @@ -Monitoring replication slave -============================ - -**Note**: this recipe is working with ArangoDB 2.5, you need a collectd curl_json plugin with correct boolean type mapping. - -Problem -------- - -How to monitor the slave status using the `collectd curl_JSON` plugin. - -Solution --------- - -Since arangodb [reports the replication status in JSON](../../HTTP/Replications/ReplicationApplier.html#state-of-the-replication-applier), -integrating it with the [collectd curl_JSON plugin](Collectd.md) -should be an easy exercise. However, only very recent versions of collectd will handle boolean flags correctly. - -Our test master/slave setup runs with the the master listening on `tcp://127.0.0.1:8529` and the slave (which we query) listening on `tcp://127.0.0.1:8530`. -They replicate a dabatase by the name `testDatabase`. - -Since replication appliers are active per database and our example doesn't use the default `_system`, we need to specify its name in the URL like this: `_db/testDatabase`. - -We need to parse a document from a request like this: - - curl --dump - http://localhost:8530/_db/testDatabase/_api/replication/applier-state - -If the replication is not running the document will look like that: - -```javascript -{ - "state": { - "running": false, - "lastAppliedContinuousTick": null, - "lastProcessedContinuousTick": null, - "lastAvailableContinuousTick": null, - "safeResumeTick": null, - "progress": { - "time": "2015-11-02T13:24:07Z", - "message": "applier shut down", - "failedConnects": 0 - }, - "totalRequests": 1, - "totalFailedConnects": 0, - "totalEvents": 0, - "totalOperationsExcluded": 0, - "lastError": { - "time": "2015-11-02T13:24:07Z", - "errorMessage": "no start tick", - "errorNum": 1413 - }, - "time": "2015-11-02T13:31:53Z" - }, - "server": { - "version": "2.7.0", - "serverId": "175584498800385" - }, - "endpoint": "tcp://127.0.0.1:8529", - "database": "testDatabase" -} -``` - -A running replication will return something like this: - -```javascript -{ - "state": { - "running": true, - "lastAppliedContinuousTick": "1150610894145", - "lastProcessedContinuousTick": "1150610894145", - "lastAvailableContinuousTick": "1151639153985", - "safeResumeTick": "1150610894145", - "progress": { - "time": "2015-11-02T13:49:56Z", - "message": "fetching master log from tick 1150610894145", - "failedConnects": 0 - }, - "totalRequests": 12, - "totalFailedConnects": 0, - "totalEvents": 2, - "totalOperationsExcluded": 0, - "lastError": { - "errorNum": 0 - }, - "time": "2015-11-02T13:49:57Z" - }, - "server": { - "version": "2.7.0", - "serverId": "175584498800385" - }, - "endpoint": "tcp://127.0.0.1:8529", - "database": "testDatabase" -} -``` - -We create a simple collectd configuration in `/etc/collectd/collectd.conf.d/slave_testDatabase.conf` that matches our API: - -```javascript -TypesDB "/etc/collectd/collectd.conf.d/slavestate_types.db" - - # Adjust the URL so collectd can reach your arangod slave instance: - - # Set your authentication to that database here: - # User "foo" - # Password "bar" - - Type "boolean" - - - Type "counter" - - - Type "counter" - - - Type "counter" - - - -``` - -To get nice metric names, we specify our own `types.db` file in `/etc/collectd/collectd.conf.d/slavestate_types.db`: - -``` -boolean value:ABSOLUTE:0:1 -``` - -So, basically `state/running` will give you `0`/`1` if its (not / ) running through the collectd monitor. - - -**Author:** [Wilfried Goesgens](https://github.com/dothebart) - -**Tags:** #monitoring #foxx #json diff --git a/Documentation/Books/Cookbook/Monitoring/TrafficWithIPAccounting.md b/Documentation/Books/Cookbook/Monitoring/TrafficWithIPAccounting.md deleted file mode 100644 index 7b601f70e931..000000000000 --- a/Documentation/Books/Cookbook/Monitoring/TrafficWithIPAccounting.md +++ /dev/null @@ -1,206 +0,0 @@ -Monitoring ArangoDB Cluster network usage -========================================= - -Problem -------- - -We run a cluster and want to know whether the traffic is unbalanced or something like that. We want a cheap estimate which host has how much traffic. - -Solution --------- - -As we already run [Collectd](http://collectd.org) as our metric-hub, we want to utilize it to also give us these figures. A very cheap way to generate these values are the counters in the IPTables firewall of our system. - -### Ingredients - -For this recipe you need to install the following tools: - -- [collectd](https://collectd.org/): the aggregation Daemon -- [kcollectd](https://www.forwiss.uni-passau.de/~berberic/Linux/kcollectd.html) for inspecting the data -- [iptables](http://en.wikipedia.org/wiki/Iptables) - should come with your Linux distribution -- [ferm](http://ferm.foo-projects.org/download/2.2/ferm.html#basic_iptables_match_keywords) for compact firewall code -- we base on [Monitoring with Collecd recipe](Collectd.md) for understanding the basics about collectd - -### Getting the state and the Ports of your cluster - -Now we need to find out the current configuration of our cluster. For the time being we assume you simply issued - - ./scripts/startLocalCluster.sh - -to get you set up. So you know you've got two DB-Servers - one Coordinator, one agent: - - ps -eaf |grep arango - arangod 21406 1 1 16:59 pts/14 00:00:00 bin/etcd-arango --data-dir /var/tmp/tmp-21550-1347489353/shell_server/agentarango4001 --name agentarango4001 --bind-addr 127.0.0.1:4001 --addr 127.0.0.1:4001 --peer-bind-addr 127.0.0.1:7001 --peer-addr 127.0.0.1:7001 --initial-cluster-state new --initial-cluster agentarango4001=http://127.0.0.1:7001 - arangod 21408 1 4 16:56 pts/14 00:00:01 bin/arangod --database.directory cluster/data8629 --cluster.agency-endpoint tcp://localhost:4001 --cluster.my-address tcp://localhost:8629 --server.endpoint tcp://localhost:8629 --log.file cluster/8629.log - arangod 21410 1 5 16:56 pts/14 00:00:02 bin/arangod --database.directory cluster/data8630 --cluster.agency-endpoint tcp://localhost:4001 --cluster.my-address tcp://localhost:8630 --server.endpoint tcp://localhost:8630 --log.file cluster/8630.log - arangod 21416 1 5 16:56 pts/14 00:00:02 bin/arangod --database.directory cluster/data8530 --cluster.agency-endpoint tcp://localhost:4001 --cluster.my-address tcp://localhost:8530 --server.endpoint tcp://localhost:8530 --log.file cluster/8530.log - -We can now check which ports they occupied: - - netstat -aplnt |grep arango - tcp 0 0 127.0.0.1:7001 0.0.0.0:* LISTEN 21406/etcd-arango - tcp 0 0 127.0.0.1:4001 0.0.0.0:* LISTEN 21406/etcd-arango - tcp 0 0 127.0.0.1:8530 0.0.0.0:* LISTEN 21416/arangod - tcp 0 0 127.0.0.1:8629 0.0.0.0:* LISTEN 21408/arangod - tcp 0 0 127.0.0.1:8630 0.0.0.0:* LISTEN 21410/arangod - -- The agent has 7001 and 4001. Since it's running in single server mode its cluster port (7001) should not show any traffic, port 4001 is the interesting one. -- Claus - This is the coordinator. Your Application will talk to it on port 8530 -- Pavel - This is the first DB-Server; Claus will talk to it on port 8629 -- Perry - This is the second DB-Server; Claus will talk to it on port 8630 - -### Configuring IPTables / ferm - -Since the usual solution using shell scripts calling iptables -brings the [DRY principle](http://en.wikipedia.org/wiki/Don%27t_repeat_yourself) to a grinding hold, -we need something better. Here [ferm](http://ferm.foo-projects.org/download/2.2/ferm.html#basic_iptables_match_keywords) comes to the rescue - -It enables you to produce very compact and well readable firewall configurations. - -According to the ports we found in the last section, we will configure our firewall in `/etc/ferm/ferm.conf`, and put the identities into the comments so we have a persistent naming scheme: - - # blindly forward these to the accounting chain: - @def $ARANGO_RANGE=4000:9000; - - @def &TCP_ACCOUNTING($PORT, $COMMENT, $SRCCHAIN) = { - @def $FULLCOMMENT=@cat($COMMENT, "_", $SRCCHAIN); - dport $PORT mod comment comment $FULLCOMMENT NOP; - } - - @def &ARANGO_ACCOUNTING($CHAINNAME) = { - # The coordinators: - &TCP_ACCOUNTING(8530, "Claus", $CHAINNAME); - # The db-servers: - &TCP_ACCOUNTING(8629, "Pavel", $CHAINNAME); - &TCP_ACCOUNTING(8630, "Perry", $CHAINNAME); - # The agency: - &TCP_ACCOUNTING(4001, "etcd_client", $CHAINNAME); - # it shouldn't talk to itself if it is only running with a single instance: - &TCP_ACCOUNTING(7007, "etcd_cluster", $CHAINNAME); - } - - table filter { - chain INPUT { - proto tcp dport $ARANGO_RANGE @subchain "Accounting" { - &ARANGO_ACCOUNTING("input"); - } - policy DROP; - - # connection tracking - mod state state INVALID DROP; - mod state state (ESTABLISHED RELATED) ACCEPT; - - # allow local packet - interface lo ACCEPT; - - # respond to ping - proto icmp ACCEPT; - - # allow IPsec - proto udp dport 500 ACCEPT; - proto (esp ah) ACCEPT; - - # allow SSH connections - proto tcp dport ssh ACCEPT; - } - chain OUTPUT { - policy ACCEPT; - - proto tcp dport $ARANGO_RANGE @subchain "Accounting" { - &ARANGO_ACCOUNTING("output"); - } - - # connection tracking - #mod state state INVALID DROP; - mod state state (ESTABLISHED RELATED) ACCEPT; - } - chain FORWARD { - policy DROP; - - # connection tracking - mod state state INVALID DROP; - mod state state (ESTABLISHED RELATED) ACCEPT; - } - } - -**Note**: This is a very basic configuration, mainly with the purpose to demonstrate the accounting feature - so don't run this in production) - -After activating it interactively with - - ferm -i /etc/ferm/ferm.conf - -We now use the iptables command line utility directly to review the status our current setting: - - iptables -L -nvx - Chain INPUT (policy DROP 85 packets, 6046 bytes) - pkts bytes target prot opt in out source destination - 7636 1821798 Accounting tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpts:4000:9000 - 0 0 DROP all -- * * 0.0.0.0/0 0.0.0.0/0 state INVALID - 14700 14857709 ACCEPT all -- * * 0.0.0.0/0 0.0.0.0/0 state RELATED,ESTABLISHED - 130 7800 ACCEPT all -- lo * 0.0.0.0/0 0.0.0.0/0 - 0 0 ACCEPT icmp -- * * 0.0.0.0/0 0.0.0.0/0 - 0 0 ACCEPT udp -- * * 0.0.0.0/0 0.0.0.0/0 udp dpt:500 - 0 0 ACCEPT esp -- * * 0.0.0.0/0 0.0.0.0/0 - 0 0 ACCEPT ah -- * * 0.0.0.0/0 0.0.0.0/0 - 0 0 ACCEPT tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:22 - - Chain FORWARD (policy DROP 0 packets, 0 bytes) - pkts bytes target prot opt in out source destination - 0 0 DROP all -- * * 0.0.0.0/0 0.0.0.0/0 state INVALID - 0 0 ACCEPT all -- * * 0.0.0.0/0 0.0.0.0/0 state RELATED,ESTABLISHED - - Chain OUTPUT (policy ACCEPT 296 packets, 19404 bytes) - pkts bytes target prot opt in out source destination - 7720 1882404 Accounting tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpts:4000:9000 - 14575 14884356 ACCEPT all -- * * 0.0.0.0/0 0.0.0.0/0 state RELATED,ESTABLISHED - - Chain Accounting (2 references) - pkts bytes target prot opt in out source destination - 204 57750 tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:8530 /* Claus_input */ - 20 17890 tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:8629 /* Pavel_input */ - 262 97352 tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:8630 /* Perry_input */ - 2604 336184 tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:4001 /* etcd_client_input */ - 0 0 tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:7007 /* etcd_cluster_input */ - 204 57750 tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:8530 /* Claus_output */ - 20 17890 tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:8629 /* Pavel_output */ - 262 97352 tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:8630 /* Perry_output */ - 2604 336184 tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:4001 /* etcd_client_output */ - 0 0 tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:7007 /* etcd_cluster_output */ - - -You can see nicely the Accounting sub-chain with our comments. These should be pretty straight forward to match. -We also see the **pkts** and **bytes** columns. They contain the current value of these counters of your system. - -Read more about [linux firewalling](http://lartc.org) and -[ferm configuration](http://ferm.foo-projects.org/download/2.2/ferm.html) to be sure you do the right thing. - -### Configuring Collectd to pick up these values - -Since your system now generates these numbers, we want to configure collectd with its [iptables plugin](https://collectd.org/wiki/index.php/Plugin:IPTables) to aggregate them. - -We do so in the `/etc/collectd/collectd.conf.d/iptables.conf`: - - LoadPlugin iptables - - Chain filter "Accounting" "Claus_input" - Chain filter "Accounting" "Pavel_input" - Chain filter "Accounting" "Perry_input" - Chain filter "Accounting" "etcd_client_input" - Chain filter "Accounting" "etcd_cluster_input" - Chain filter "Accounting" "Claus_output" - Chain filter "Accounting" "Pavel_output" - Chain filter "Accounting" "Perry_output" - Chain filter "Accounting" "etcd_client_output" - Chain filter "Accounting" "etcd_cluster_output" - - -Now we restart collectd with `/etc/init.d/collectd restart`, watch the syslog for errors. If everything is OK, our values should show up in: - - /var/lib/collectd/rrd/localhost/iptables-filter-Accounting/ipt_packets-Claus_output.rrd - -We can inspect our values with kcollectd: - -![Kcollectd screenshot](../assets/MonitoringWithCollectd/KCollectdIPtablesAccounting.png) - -**Author:** [Wilfried Goesgens](https://github.com/dothebart) - -**Tags:** #monitoring diff --git a/Documentation/Books/Cookbook/README.md b/Documentation/Books/Cookbook/README.md deleted file mode 100644 index a5ec1219c79c..000000000000 --- a/Documentation/Books/Cookbook/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Cookbook - -This cookbook is filled with recipes to help you understand the [multi-model database ArangoDB](https://www.arangodb.com/) better -and to help you with specific problems. - -You can participate and [write your own recipes][2]. -You only need to write a recipe in markdown and make a [pull request to our repository][2]. - -**Recipes** - -There will be some simple recipes to bring you closer to ArangoDB and show you the amount of possibilities -of our Database. -There also will be more complex problems to show you solution to specific problems and the depth of ArangoDB. - -Every recipe is divided into three parts: - -1. **Problem**: A description of the problem -2. **Solution**: A detailed solution of the given problem with code if any is needed -3. **Comment**: Explanation of the solution. This part is optional depending on the complexity of the problem - -Every recipe has tags to for a better overview: - -*#api*, *#aql*, *#arangosh*, *#collection*, *#database*, *#debian*, *#docker*, *#document*, *#driver*, *#foxx*, *#giantswarm*, *#graph*, *#howto*, *#java*, *#javascript*, *#join*, *#nodejs*, *#windows* - -[2]: https://github.com/arangodb/arangodb/tree/devel/Documentation/Books/Cookbook diff --git a/Documentation/Books/Cookbook/SUMMARY.md b/Documentation/Books/Cookbook/SUMMARY.md deleted file mode 100644 index 321a3ac0b518..000000000000 --- a/Documentation/Books/Cookbook/SUMMARY.md +++ /dev/null @@ -1,46 +0,0 @@ -# Summary -* [Introduction](README.md) -* [Modelling Document Inheritance](DocumentInheritance.md) -* [Accessing Shapes Data](AccessingShapesData.md) -* [AQL](AQL/README.md) - * [Using Joins in AQL](AQL/Joins.md) - * [Using Dynamic Attribute Names](AQL/DynamicAttributeNames.md) - * [Creating Test-data using AQL](AQL/CreatingTestData.md) - * [Diffing Documents](AQL/DiffingDocuments.md) - * [Avoiding Parameter Injection](AQL/AvoidingInjection.md) - * [Multiline Query Strings](AQL/MultilineQueryStrings.md) - * [Migrating named graph functions to 3.0](AQL/MigratingGraphFunctionsTo3.md) - * [Migrating anonymous graph functions to 3.0](AQL/MigratingEdgeFunctionsTo3.md) - * [Migrating graph measurements to 3.0](AQL/MigratingMeasurementsTo3.md) -* [Graph](Graph/README.md) - * [Fulldepth Graph-Traversal](Graph/FulldepthTraversal.md) - * [Using a custom Visitor](Graph/CustomVisitorFromNodeJs.md) - * [Example AQL Queries for Graphs](Graph/ExampleActorsAndMovies.md) -* [Use Cases / Examples](UseCases/README.md) - * [Monetary data without precision loss](UseCases/MonetaryDataWithoutPrecisionLoss.md) - * [Populating a Textbox](UseCases/PopulatingAnAutocompleteTextbox.md) - * [Exporting Data](UseCases/ExportingData.md) - * [Accessing base documents with Java](UseCases/JavaDriverBaseDocument.md) - * [Add XML data to ArangoDB with Java](UseCases/JavaDriverXmlData.md) -* [Administration](Administration/README.md) - * [Using Authentication](Administration/Authentication.md) - * [Importing Data](Administration/ImportingData.md) - * [Replication](Administration/Replication/README.md) - * [Replicating Data](Administration/ReplicatingData.md) - * [Slave Initialization](Administration/Replication/ReplicationFromBackup.md) - * [Silent installation on Windows](Administration/NSISSilentMode.md) - * [Migrating 2.8 to 3.0](Administration/Migrate2.8to3.0.md) - * [Show grants function](Administration/ShowUsersGrants.md) -* [Compiling / Build](Compiling/README.md) - * [Compile on Debian](Compiling/Debian.md) - * [Compile on Windows](Compiling/Windows.md) - * [Running Custom Build](Compiling/RunningCustomBuild.md) - * [Recompiling jemalloc](Compiling/jemalloc.md) -* [Docker](Cloud/README.md) - * [Docker ArangoDB](Cloud/DockerContainer.md) - * [Docker with NodeJS App](Cloud/NodeJsDocker.md) -* [Monitoring](Monitoring/Collectd.md) - * [Collectd - Replication Slaves](Monitoring/SlaveStatus.md) - * [Collectd - Network usage](Monitoring/TrafficWithIPAccounting.md) - * [Collectd - more Metrics](Monitoring/OtherRelevantMetrics.md) - * [Collectd - Monitoring Foxx](Monitoring/FoxxApps.md) diff --git a/Documentation/Books/Cookbook/UseCases/ExportingData.md b/Documentation/Books/Cookbook/UseCases/ExportingData.md deleted file mode 100644 index 03d8ac8bcf86..000000000000 --- a/Documentation/Books/Cookbook/UseCases/ExportingData.md +++ /dev/null @@ -1,310 +0,0 @@ -Exporting Data for Offline Processing -===================================== - -In this recipe we will learn how to use the [export API][1] to extract data and process it with PHP. At the end of the recipe you can download the complete PHP script. - -**Note**: The following recipe is written using an ArangoDB server with version 2.6 or higher. You can also use the `devel` branch, since version 2.6 hasn't been an official release yet. - -Howto ------ - -### Importing example data - -First of all we need some data in an ArangoDB collection. For this example we will use a collection named `users` which we will populate with 100.000 [example documents][2]. This way you can get the data into ArangoDB: - -```bash -# download data file -wget https://jsteemann.github.io/downloads/code/users-100000.json.tar.gz -# uncompress it -tar xvfz users-100000.json.tar.gz -# import into ArangoDB -arangoimport --file users-100000.json --collection users --create-collection true -``` - -### Setting up ArangoDB-PHP - -For this recipe we will use the [ArangoDB PHP driver][3]: - -```bash -git clone -b devel "https://github.com/arangodb/arangodb-php.git" -``` - -We will now write a simple PHP script that establishes a connection to ArangoDB on localhost: - -```php - 'tcp://localhost:8529', - // can use Keep-Alive connection - ConnectionOptions::OPTION_CONNECTION => 'Keep-Alive', - // use basic authorization - ConnectionOptions::OPTION_AUTH_TYPE => 'Basic', - // user for basic authorization - ConnectionOptions::OPTION_AUTH_USER => 'root', - // password for basic authorization - ConnectionOptions::OPTION_AUTH_PASSWD => '', - // timeout in seconds - ConnectionOptions::OPTION_TIMEOUT => 30, - // database name - ConnectionOptions::OPTION_DATABASE => '_system' - ); - -try { - // establish connection - $connection = new Connection($connectionOptions); - - echo 'Connected!' . PHP_EOL; - - // TODO: now do something useful with the connection! - -} catch (ConnectException $e) { - print $e . PHP_EOL; -} catch (ServerException $e) { - print $e . PHP_EOL; -} catch (ClientException $e) { - print $e . PHP_EOL; -} -``` - -After running the script you should see `Connected!` in the bash if successful. - -### Extracting the data - -Now we can run an export of the data in the collection `users`. Place the following code into the `TODO` part of the first code: - -```php -function export($collection, Connection $connection) { - $fp = fopen('output.json', 'w'); - - if (! $fp) { - throw new Exception('could not open output file!'); - } - - // settings to use for the export - $settings = array( - 'batchSize' => 5000, // export in chunks of 5K documents - '_flat' => true // use simple PHP arrays - ); - - $export = new Export($connection, $collection, $settings); - - // execute the export. this will return an export cursor - $cursor = $export->execute(); - - // statistics - $count = 0; - $batches = 0; - $bytes = 0; - - // now we can fetch the documents from the collection in batches - while ($docs = $cursor->getNextBatch()) { - $output = ''; - foreach ($docs as $doc) { - $output .= json_encode($doc) . PHP_EOL; - } - - // write out chunk - fwrite($fp, $output); - - // update statistics - $count += count($docs); - $bytes += strlen($output); - ++$batches; - } - - fclose($fp); - - echo sprintf('written %d documents in %d batches with %d total bytes', - $count, - $batches, - $bytes) . PHP_EOL; -} - -// run the export -export('users', $connection); -``` - -The function extracts all documents from the collection and writes them into an output file `output.json`. In addition it will print some statistics about the number of documents and the total data size: - -```json -written 100000 documents in 20 batches with 40890013 total bytes -``` - -### Applying some transformations - -We now will use PHP to transform data as we extract it: - -```php -function transformDate($value) { - return preg_replace('/^(\\d+)-(\\d+)-(\\d+)$/', '\\2/\\3/\\1', $value); -} - -function transform(array $document) { - static $genders = array('male' => 'm', 'female' => 'f'); - - $transformed = array( - 'gender' => $genders[$document['gender']], - 'dob' => transformDate($document['birthday']), - 'memberSince' => transformDate($document['memberSince']), - 'fullName' => $document['name']['first'] . ' ' . $document['name']['last'], - 'email' => $document['contact']['email'][0] - ); - - return $transformed; -} - -function export($collection, Connection $connection) { - $fp = fopen('output-transformed.json', 'w'); - - if (! $fp) { - throw new Exception('could not open output file!'); - } - - // settings to use for the export - $settings = array( - 'batchSize' => 5000, // export in chunks of 5K documents - '_flat' => true // use simple PHP arrays - ); - - $export = new Export($connection, $collection, $settings); - - // execute the export. this will return an export cursor - $cursor = $export->execute(); - - // now we can fetch the documents from the collection in batches - while ($docs = $cursor->getNextBatch()) { - $output = ''; - foreach ($docs as $doc) { - $output .= json_encode(transform($doc)) . PHP_EOL; - } - - // write out chunk - fwrite($fp, $output); - } - - fclose($fp); -} - -// run the export -export('users', $connection); -``` - -With this script the following changes will be made on the data: -- rewrite the contents of the `gender`attribute. `female` becomes `f` and `male` becomes `m` -- `birthday` now becomes `dob` -- the date formations will be changed from YYYY-MM-DD to MM/DD/YYYY -- concatenate the contents of `name.first` and `name.last` -- `contact.email` will be transformed from an array to a flat string -- every other attribute will be removed - -**Note**: The output will be in a file named `output-transformed.json`. - -### Filtering attributes - -#### Exclude certain attributes - -Instead of filtering out as done in the previous example we can easily configure the export to exclude these attributes server-side: - -```php -// settings to use for the export -$settings = array( - 'batchSize' => 5000, // export in chunks of 5K documents - '_flat' => true, // use simple PHP arrays - 'restrict' => array( - 'type' => 'exclude', - 'fields' => array('_id', '_rev', '_key', 'likes') - ) -); -``` - -This script will exclude the attributes `_id`, `_rev`. `_key` and `likes`. - -#### Include certain attributes - -We can also include attributes with the following script: - -```php -function export($collection, Connection $connection) { - // settings to use for the export - $settings = array( - 'batchSize' => 5000, // export in chunks of 5K documents - '_flat' => true, // use simple PHP arrays - 'restrict' => array( - 'type' => 'include', - 'fields' => array('_key', 'name') - ) - ); - - $export = new Export($connection, $collection, $settings); - - // execute the export. this will return an export cursor - $cursor = $export->execute(); - - // now we can fetch the documents from the collection in batches - while ($docs = $cursor->getNextBatch()) { - $output = ''; - - foreach ($docs as $doc) { - $values = array( - $doc['_key'], - $doc['name']['first'] . ' ' . $doc['name']['last'] - ); - - $output .= '"' . implode('","', $values) . '"' . PHP_EOL; - } - - // print out the data directly - print $output; - } -} - -// run the export -export('users', $connection); -``` - -In this script only the `_key` and `name` attributes are extracted. In the prints the `_key`/`name` pairs are in CSV format. - -**Note**: The whole script [can be downloaded][4]. - -### Using the API without PHP - -The export API REST interface can be used with any client that can speak HTTP like curl. With the following command you can fetch the documents from the `users` collection: - -```bash -curl - -X POST - http://localhost:8529/_api/export?collection=users ---data '{"batchSize":5000}' -``` - -The HTTP response will contatin a `result` attribute that contains the actual documents. The attribute `hasMore` will indicate if there are more documents for the client to fetch. -The HTTP will also contain an attribute `id` if set to _true_. - -With the `id` you can send follow-up requests like this: - -```bash -curl - -X PUT - http://localhost:8529/_api/export/13979338067709 -``` - - -**Authors:** [Thomas Schmidts](https://github.com/13abylon) - and [Jan Steemann](https://github.com/jsteemann) - -**Tags**: #howto #php - - -[1]: https://jsteemann.github.io/blog/2015/04/04/more-efficient-data-exports/ -[2]: https://jsteemann.github.io/downloads/code/users-100000.json.tar.gz -[3]: https://github.com/arangodb/arangodb-php -[4]: https://jsteemann.github.io/downloads/code/export-csv.php diff --git a/Documentation/Books/Cookbook/UseCases/JavaDriverBaseDocument.md b/Documentation/Books/Cookbook/UseCases/JavaDriverBaseDocument.md deleted file mode 100644 index 9a0ac66ad1dc..000000000000 --- a/Documentation/Books/Cookbook/UseCases/JavaDriverBaseDocument.md +++ /dev/null @@ -1,66 +0,0 @@ -How to retrieve documents from ArangoDB without knowing the structure? -====================================================================== - -Problem -------- - -If you use a NoSQL database it's common to retrieve documents with an unknown attribute structure. Furthermore, the amount and types of attributes may differ in documents resulting from a single query. Another problem is that you want to add one ore more attributes to a document. - -In Java you are used to work with objects. Regarding the upper requirements it is possible to directly retrieve objects with the same attribute structure as the document out of the database. Adding attributes to an object at runtime could be very messy. - -**Note**: ArangoDB 3.1 and the corresponding [Java driver](https://github.com/arangodb/arangodb-java-driver#supported-versions) is needed. - - -Solution --------- - -With the latest version of the Java driver of ArangoDB an object called `BaseDocument` is provided. - -The structure is very simple: It only has four attributes: - -```java -public class BaseDocument { - - String id; - String key; - String revision; - Map properties; - -} -``` - -The first three attributes are the system attributes `_id`, `_key` and `_rev`. The fourth attribute is a `HashMap`. The key always is a String, the value an object. These properties contain all non system attributes of the document. - -The map can contain values of the following types: - -* Map -* List -* Boolean -* Number -* String -* null - -**Note**: `Map` and `List` contain objects, which are of the same types as listed above. - -To retrieve a document is similar to the known procedure, except that you use `BaseDocument` as type. - -```java -ArangoDB.Builder arango = new ArangoDB.Builder().builder(); -DocumentEntity myObject = arango.db().collection("myCollection").getDocument("myDocumentKey", BaseDocument.class); -``` - - -Other resources ---------------- - -More documentation about the ArangoDB Java driver is available: - -- [Tutorial: Java in ten minutes](https://www.arangodb.com/tutorials/tutorial-sync-java-driver/) -- [Java driver at Github](https://github.com/arangodb/arangodb-java-driver) -- [Example source code](https://github.com/arangodb/arangodb-java-driver/tree/master/src/test/java/com/arangodb/example) -- [JavaDoc](http://arangodb.github.io/arangodb-java-driver/javadoc-4_1/index.html) - -**Author**: [gschwab](https://github.com/gschwab), - [Mark Vollmary](https://github.com/mpv1989) - -**Tags**: #java #driver diff --git a/Documentation/Books/Cookbook/UseCases/JavaDriverXmlData.md b/Documentation/Books/Cookbook/UseCases/JavaDriverXmlData.md deleted file mode 100644 index 356169ad8cb4..000000000000 --- a/Documentation/Books/Cookbook/UseCases/JavaDriverXmlData.md +++ /dev/null @@ -1,208 +0,0 @@ -How to add XML data to ArangoDB? -================================ - -Problem -------- - -You want to store XML data files into a database to have the ability to make queries onto them. - -**Note**: ArangoDB 3.1 and the corresponding Java driver is needed. - -Solution --------- - -Since version 3.1.0 the arangodb-java-driver supports writing, reading and querying of raw strings containing the JSON documents. - -With [JsonML](http://www.jsonml.org/) you can convert a XML string into a JSON string and back to XML again. - -Converting XML into JSON with JsonML example: - -```java -String string = " " - + "Basic bread " - + "Flour " - + "Yeast " - + "Water " - + "Salt " - + " " - + "Mix all ingredients together. " - + "Knead thoroughly. " - + "Cover with a cloth, and leave for one hour in warm room. " - + "Knead again. " - + "Place in a bread baking tin. " - + "Cover with a cloth, and leave for one hour in warm room. " - + "Bake in the oven at 180(degrees)C for 30 minutes. " - + " " - + " "; - -JSONObject jsonObject = JSONML.toJSONObject(string); -System.out.println(jsonObject.toString()); -``` - -The converted JSON string: - -```json -{ - "prep_time" : "5 mins", - "name" : "bread", - "cook_time" : "3 hours", - "tagName" : "recipe", - "childNodes" : [ - { - "childNodes" : [ - "Basic bread" - ], - "tagName" : "title" - }, - { - "childNodes" : [ - "Flour" - ], - "tagName" : "ingredient", - "amount" : 8, - "unit" : "dL" - }, - { - "unit" : "grams", - "amount" : 10, - "tagName" : "ingredient", - "childNodes" : [ - "Yeast" - ] - }, - { - "childNodes" : [ - "Water" - ], - "tagName" : "ingredient", - "amount" : 4, - "unit" : "dL", - "state" : "warm" - }, - { - "childNodes" : [ - "Salt" - ], - "tagName" : "ingredient", - "unit" : "teaspoon", - "amount" : 1 - }, - { - "childNodes" : [ - { - "tagName" : "step", - "childNodes" : [ - "Mix all ingredients together." - ] - }, - { - "tagName" : "step", - "childNodes" : [ - "Knead thoroughly." - ] - }, - { - "childNodes" : [ - "Cover with a cloth, and leave for one hour in warm room." - ], - "tagName" : "step" - }, - { - "tagName" : "step", - "childNodes" : [ - "Knead again." - ] - }, - { - "childNodes" : [ - "Place in a bread baking tin." - ], - "tagName" : "step" - }, - { - "tagName" : "step", - "childNodes" : [ - "Cover with a cloth, and leave for one hour in warm room." - ] - }, - { - "tagName" : "step", - "childNodes" : [ - "Bake in the oven at 180(degrees)C for 30 minutes." - ] - } - ], - "tagName" : "instructions" - } - ] -} -``` - -Saving the converted JSON to ArangoDB example: - -```java -ArangoDB.Builder arango = new ArangoDB.Builder().build(); -ArangoCollection collection = arango.db().collection("testCollection") -DocumentCreateEntity entity = collection.insertDocument( - jsonObject.toString()); -String key = entity.getKey(); -``` - -Reading the stored JSON as a string and convert it back to XML example: - -```java -String rawJsonString = collection.getDocument(key, String.class); -String xml = JSONML.toString(rawJsonString); -System.out.println(xml); -``` - -Example output: - -```xml - - Basic bread - Flour - Yeast - Water - Salt - - Mix all ingredients together. - Knead thoroughly. - Cover with a cloth, and leave for one hour in warm room. - Knead again. - Place in a bread baking tin. - Cover with a cloth, and leave for one hour in warm room. - Bake in the oven at 180(degrees)C for 30 minutes. - - -``` - -**Note:** The [fields mandatory to ArangoDB documents](../../Manual/DataModeling/Documents/DocumentAddress.html) are added; If they break your XML schema you have to remove them. - -Query raw data example: - -```java -String queryString = "FOR t IN testCollection FILTER t.cook_time == '3 hours' RETURN t"; -ArangoCursor cursor = arango.db().query(queryString, null, null, String.class); -while (cursor.hasNext()) { - JSONObject jsonObject = new JSONObject(cursor.next()); - String xml = JSONML.toString(jsonObject); - System.out.println("XML value: " + xml); -} -``` - -Other resources ---------------- - -More documentation about the ArangoDB Java driver is available: - -- [Tutorial: Java in ten minutes](https://www.arangodb.com/tutorials/tutorial-sync-java-driver/) -- [Java driver at Github](https://github.com/arangodb/arangodb-java-driver) -- [Example source code](https://github.com/arangodb/arangodb-java-driver/tree/master/src/test/java/com/arangodb/example) -- [JavaDoc](http://arangodb.github.io/arangodb-java-driver/javadoc-4_1/index.html) - -**Author**: [Achim Brandt](https://github.com/a-brandt), - [Mark Vollmary](https://github.com/mpv1989) - -**Tags**: #java #driver diff --git a/Documentation/Books/Cookbook/UseCases/MonetaryDataWithoutPrecisionLoss.md b/Documentation/Books/Cookbook/UseCases/MonetaryDataWithoutPrecisionLoss.md deleted file mode 100644 index 41abd402d5bf..000000000000 --- a/Documentation/Books/Cookbook/UseCases/MonetaryDataWithoutPrecisionLoss.md +++ /dev/null @@ -1,37 +0,0 @@ -Working with monetary data without precision loss in ArangoDB -============================================================= - -Problem -------- - -Applications that handle monetary data often require to capture fractional units -of currency and need to emulate decimal rounding without precision loss. -Compared to relational databases, JSON does not support arbitrary precision -out-of-the-box but there are suitable workarounds. - -Solution --------- - -In ArangoDB there are two ways to handle monetary data: - -1. Monetary data **as integer**: -
- If you store data as integer, decimals can be avoided by using a general - scale factor, eg. `100` making `19.99` to `1999`. This solution will work - for digits of up to (excluding) 253 without precision loss. Calculations - can then be done on the server side. - -2. Monetary data **as string**: -
- If you only want to store and retrieve monetary data you can do so without - any precision loss by storing this data as string. However, when using - strings for monetary data values it will not be possible to do calculations - on them on the server. Calculations have to happen in application logic - that is capable of doing arithmetic on string-encoded integers. - - **Authors:** - [Jan Stücke](https://github.com/MrPieces), - [Jan Steemann](https://github.com/jsteemann) - - **Tags**: #howto #datamodel #numbers - diff --git a/Documentation/Books/Cookbook/UseCases/PopulatingAnAutocompleteTextbox.md b/Documentation/Books/Cookbook/UseCases/PopulatingAnAutocompleteTextbox.md deleted file mode 100644 index 618e3ddb4b79..000000000000 --- a/Documentation/Books/Cookbook/UseCases/PopulatingAnAutocompleteTextbox.md +++ /dev/null @@ -1,123 +0,0 @@ -Populating an autocomplete textbox -================================== - -Problem -------- - -I want to populate an autocomplete textbox with values from a collection. The completions -should adjust dynamically based on user input. - -Solution --------- - -Use a web framework for the client-side autocomplete rendering and event processing. Use -a collection with a (sorted) skiplist index and a range query on it to efficiently fetch -the completion values dynamically. Connect the two using a simple Foxx route. - -### Install an example app - -[This app](https://github.com/jsteemann/autocomplete) contains a jquery-powered web page -with an autocomplete textbox. It uses [jquery autocomplete](http://jqueryui.com/autocomplete/), -but every other web framework will also do. - -The app can be installed as follows: - -* in the ArangoDB web interface, switch into the **Applications** tab -* there, click *Add Application* -* switch on the *Github* tab -* for *Repository*, enter `jsteemann/autocomplete` -* for *Version*, enter `master` -* click *Install* - -Now enter a mountpoint for the application. This is the URL path under which the -application will become available. For the example app, the mount point does not matter. -The web page in the example app assumes it is served by ArangoDB, too. So it uses a -relative URL `autocomplete`. This is easiest to set up, but in reality you might want -to have your web page served by a different server. In this case, your web page will -have to call the app mount point you just entered. - -To see the example app in action, click on **Open**. The autocomplete textbox should be -populated with server data when at least two letters are entered. - -### Backend code, setup script - -The app also contains a backend route `/autocomplete` which is called by the web page to -fetch completions based on user input. The HTML code for the web page is -[here](https://github.com/jsteemann/autocomplete/blob/master/assets/index.html). - -Contained in the app is a [setup script](https://github.com/jsteemann/autocomplete/blob/master/scripts/setup.js) -that will create a collection named `completions` and load some initial data into it. The -example app provides autocompletion for US city names, and the setup script populates the -collection with about 10K city names. - -The setup script also [creates a skiplist index on the lookup attribute](https://github.com/jsteemann/autocomplete/blob/master/scripts/setup.js#L10561), -so this attribute can be used for efficient filtering and sorting later. -The `lookup` attribute contains the city names already lower-cased, and the original -(*pretty*) names are stored in attribute `pretty`. This attribute will be returned to -users. - -### Backend code, Foxx route controller - -The app contains a [controller](https://github.com/jsteemann/autocomplete/blob/master/demo.js). -The backend action `/autocomplete` that is called by the web page is also contained herein: - -```js -controller.get("/autocomplete", function (req, res) { - // search phrase entered by user - var searchString = req.params("q").trim() || ""; - // lower bound for search range - var begin = searchString.replace(/[^a-zA-Z]/g, " ").toLowerCase(); - if (begin.length === 0) { - // search phrase is empty - no need to perfom a search at all - res.json([]); - return; - } - // upper bound for search range - var end = begin.substr(0, begin.length - 1) + String.fromCharCode(begin.charCodeAt(begin.length - 1) + 1); - // bind parameters for query - var queryParams = { - "@collection" : "completions", - "begin" : begin, - "end" : end - }; - // the search query - var query = "FOR doc IN @@collection FILTER doc.lookup >= @begin && doc.lookup < @end SORT doc.lookup RETURN { label: doc.pretty, value: doc.pretty, id: doc._key }"; - res.json(db._query(query, queryParams).toArray()); -} -``` - -The backend code first fetches the search string from the URL parameter `q`. This is what the -web page will send us. - -Based on the search string, a lookup range is calculated. First of all, the search string is -lower-cased and all non-letter characters are removed from it. The resulting string is the -lower bound for the lookup. For the upper bound, we can use the lower bound with its last -letter character code increased by one. - -For example, if the user entered `Los A` into the textbox, the web page will send us the string -`Los A` in URL parameter `q`. Lower-casing and removing non-letter characters from the string, -we'll get `losa`. This is the lower bound. The upper bound is `losa`, with its last letter adjusted -to `b` (i.e. `losb`). - -Finally, the lower and upper bounds are inserted into the following query using bind parameters -`@begin` and `@end`: - -``` -FOR doc IN @@collection - FILTER doc.lookup >= @begin && doc.lookup < @end - SORT doc.lookup - RETURN { - label: doc.pretty, - value: doc.pretty, - id: doc._key - } -``` - -The city names in the lookup range will be returned sorted. For each city, three values are -returned (the `id` contains the document key, the other two values are for display purposes). -Other frameworks may require a different return format, but that can easily be done by -adjusting the AQL query. - -**Author:** [Jan Steemann](https://github.com/jsteemann) - -**Tags**: #aql #autocomplete #jquery diff --git a/Documentation/Books/Cookbook/UseCases/README.md b/Documentation/Books/Cookbook/UseCases/README.md deleted file mode 100644 index 2acffb3d9fc1..000000000000 --- a/Documentation/Books/Cookbook/UseCases/README.md +++ /dev/null @@ -1,12 +0,0 @@ -Use Cases / Examples -==================== - -- [Working with monetary data without precision loss](MonetaryDataWithoutPrecisionLoss.md) - -- [Populating a Textbox](PopulatingAnAutocompleteTextbox.md) - -- [Exporting Data](ExportingData.md) - -- [Accessing base documents with Java](JavaDriverBaseDocument.md) - -- [Add XML data to ArangoDB with Java](JavaDriverXmlData.md) diff --git a/Documentation/Books/Cookbook/assets/CompilingUnderWindows/SetEnvironmentVar.png b/Documentation/Books/Cookbook/assets/CompilingUnderWindows/SetEnvironmentVar.png deleted file mode 100644 index 603f0b885237..000000000000 Binary files a/Documentation/Books/Cookbook/assets/CompilingUnderWindows/SetEnvironmentVar.png and /dev/null differ diff --git a/Documentation/Books/Cookbook/assets/MonitoringWithCollectd/KCollectdIPtablesAccounting.png b/Documentation/Books/Cookbook/assets/MonitoringWithCollectd/KCollectdIPtablesAccounting.png deleted file mode 100644 index d08add50def4..000000000000 Binary files a/Documentation/Books/Cookbook/assets/MonitoringWithCollectd/KCollectdIPtablesAccounting.png and /dev/null differ diff --git a/Documentation/Books/Cookbook/assets/MonitoringWithCollectd/KCollectdJson.png b/Documentation/Books/Cookbook/assets/MonitoringWithCollectd/KCollectdJson.png deleted file mode 100644 index b65604a5f2bc..000000000000 Binary files a/Documentation/Books/Cookbook/assets/MonitoringWithCollectd/KCollectdJson.png and /dev/null differ diff --git a/Documentation/Books/Cookbook/book.json b/Documentation/Books/Cookbook/book.json deleted file mode 100644 index b58ba398dea8..000000000000 --- a/Documentation/Books/Cookbook/book.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "gitbook": "^3.2.2", - "title": "ArangoDB VERSION_NUMBER Cookbook", - "version": "VERSION_NUMBER", - "author": "ArangoDB GmbH", - "description": "Recipes for ArangoDB - the native multi-model NoSQL database", - "language": "en", - "plugins": [ - "-search", - "-lunr", - "-sharing", - "toggle-chapters", - "addcssjs", - "anchorjs", - "sitemap-general@git+https://github.com/Simran-B/gitbook-plugin-sitemap-general.git", - "ga", - "callouts@git+https://github.com/Simran-B/gitbook-plugin-callouts.git", - "edit-link", - "page-toc@git+https://github.com/Simran-B/gitbook-plugin-page-toc.git", - "localized-footer" - ], - "pdf": { - "fontSize": 12, - "toc": true, - "margin": { - "right": 60, - "left": 60, - "top": 35, - "bottom": 35 - } - }, - "styles": { - "website": "styles/website.css" - }, - "pluginsConfig": { - "addcssjs": { - "js": ["styles/header.js", "styles/hs.js"], - "css": ["styles/header.css"] - }, - "sitemap-general": { - "prefix": "https://docs.arangodb.com/devel/Cookbook/", - "changefreq": "@GCHANGE_FREQ@", - "priority": @GPRIORITY@ - }, - "ga": { - "token": "UA-81053435-2" - }, - "edit-link": { - "base": "https://github.com/arangodb/arangodb/edit/devel/Documentation/Books/Cookbook", - "label": "Edit Page" - }, - "localized-footer": { - "filename": "FOOTER.html" - } - } -} diff --git a/Documentation/Books/Cookbook/styles/header.css b/Documentation/Books/Cookbook/styles/header.css deleted file mode 100644 index 4ec87c77b0e5..000000000000 --- a/Documentation/Books/Cookbook/styles/header.css +++ /dev/null @@ -1,305 +0,0 @@ -/* Design fix because of the header */ -@import url(https://fonts.googleapis.com/css?family=Roboto:400,500,300,700); - -body { - overflow: hidden; - font-family: Roboto, Helvetica, sans-serif; - background: #444444; -} - -.book .book-header h1 a, .book .book-header h1 a:hover { - display: none; -} - -/* GOOGLE START */ - -.google-search #gsc-iw-id1{ - border: none !important; -} - -.google-search .gsst_b { - position: relative; - top: 10px; - left: -25px; - width: 1px; -} - -.gsst_a .gscb_a { - color: #c01a07 !important; -} - -.google-search input { - background-color: #fff !important; - font-family: Roboto, Helvetica, sans-serif; - font-size: 10pt !important; - padding-left: 5px !important; - float: right; - position: relative; - top: 8px; - width: 100% !important; - height: 30px !important; -} - -.google-search input:active { -} - -.google-search { - margin-right: 10px; - margin-left: 10px !important; - float: right !important; -} - -.google-search td, -.google-search table, -.google-search tr, -.google-search th { - background-color: #444444 !important; -} - -.google-search .gsc-input-box, -.google-search .gsc-input-box input { - border-radius: 3px !important; - width: 200px; -} - -.gsc-branding-text, -.gsc-branding-img, -.gsc-user-defined-text { - display: none !important; -} - -.google-search .gsc-input-box input { - font-size: 16px !important; -} - -.google-search .gsc-search-button { - display: none !important; -} - -.google-search .gsc-control-cse { - padding: 10px !important; -} - -.google-search > div { - float: left !important; - width: 200px !important; -} - -/* GOOGLE END */ - -.book-summary, -.book-body { - margin-top: 48px; -} - -.arangodb-logo, .arangodb-logo-small { - display: inline; - float: left; - padding-top: 12px; - margin-left: 10px; -} - -.arangodb-logo img { - height: 23px; -} - -.arangodb-logo-small { - display: none; -} - -.arangodb-version-switcher { - width: 65px; - height: 44px; - margin-left: 16px; - float: left; - display: inline; - font-weight: bold; - color: #fff; - background-color: inherit; - border: 0; -} - -.arangodb-version-switcher option { - background-color: white; - color: black; -} - - -.arangodb-header { - position: fixed; - width: 100%; - height: 48px; - z-index: 1; -} - -.arangodb-header .socialIcons-googlegroups a img { - position: relative; - height: 14px; - top: 3px; -} - -.arangodb-navmenu { - display: block; - float: right; - margin: 0; - padding: 0; -} - -.arangodb-navmenu li { - display: block; - float: left; -} - -.arangodb-navmenu li a { - display: block; - float: left; - padding: 0 10px; - line-height: 48px; - font-size: 16px; - font-weight: 400; - color: #fff; - text-decoration: none; - font-family: Roboto, Helvetica, sans-serif; -} - -.arangodb-navmenu li.active-tab a, .arangodb-navmenu li a:hover { - background-color: #88A049 !important; -} - -.downloadIcon { - margin-right: 10px; -} - -/** simple responsive updates **/ - -@media screen and (max-width: 1000px) { - .arangodb-navmenu li a { - padding: 0 6px; - } - - .arangodb-logo { - margin-left: 10px; - } - - .google-search { - margin-right: 5px !important; - } - - .downloadIcon { - margin-right: 0; - } - - .socialIcons { - display: none !important; - } -} - - -@media screen and (max-width: 800px) { - - .google-search, - .google-search .gsc-input-box, - .google-search .gsc-input-box input { - width: 130px !important; - } - - .arangodb-navmenu li a { - font-size: 15px; - padding: 0 7px; - } - - .arangodb-logo { - display: none; - } - - .arangodb-logo-small { - display: inline; - margin-left: 10px; - } - - .arangodb-logo-small img { - height: 20px; - } - - .arangodb-version-switcher { - margin: 0; - } - -} - -@media screen and (max-width: 600px) { - .arangodb-navmenu li a { - font-size: 15px; - padding: 0 7px; - } - - .arangodb-version-switcher, - .downloadIcon { - display: none !important; - } - - .google-search, - .google-search .gsc-input-box, - .google-search .gsc-input-box input { - width: 24px !important; - } - - .google-search .gsc-input-box input[style] { - background: url(https://docs.arangodb.com/assets/searchIcon.png) left center no-repeat rgb(255, 255, 255) !important; - } - - .google-search .gsc-input-box input:focus { - width: 200px !important; - position: relative; - left: -176px; - background-position: -9999px -9999px !important; - } - -} - -@media screen and (max-width: 400px) { - .arangodb-navmenu li a { - font-size: 13px; - padding: 0 5px; - } - .google-search { - display: none; - } -} - -/*Hubspot Cookie notice */ - -body div#hs-eu-cookie-confirmation { - bottom: 0; - top: auto; - position: fixed; - text-align: center !important; -} - -body div#hs-eu-cookie-confirmation.can-use-gradients { - background-image: linear-gradient(to bottom, rgba(255,255,255,0.9),rgba(255,255,255,0.75)); -} - -body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner { - display: inline-block; - padding: 15px 18px 0; -} - -body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner #hs-en-cookie-confirmation-buttons-area { - float: left; -} - -body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner a#hs-eu-confirmation-button { - background-color: #577138 !important; - border: none !important; - text-shadow: none !important; - box-shadow: none; - padding: 5px 15px !important; - margin-left: 10px; -} - -body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner > p { - float: left; - color: #000 !important; - text-shadow: none; -} diff --git a/Documentation/Books/Cookbook/styles/header.js b/Documentation/Books/Cookbook/styles/header.js deleted file mode 100644 index 8c34d6bdb6ae..000000000000 --- a/Documentation/Books/Cookbook/styles/header.js +++ /dev/null @@ -1,161 +0,0 @@ -// Try to set the version number early, jQuery not available yet -var searcheable_versions = [@BROWSEABLE_VERSIONS@]; -var cx = '@GSEARCH_ID@'; - -document.addEventListener("DOMContentLoaded", function(event) { - if (!gitbook.state.root) return; - var bookVersion = gitbook.state.root.match(/\/(\d\.\d(\.\d)?|devel)\//); - var switcher = document.getElementsByClassName("arangodb-version-switcher")[0]; - if (bookVersion) { - switcher.value = bookVersion[1]; - } else { - switcher.style.display = "none"; - } -}); - -window.onload = function(){ -window.localStorage.removeItem(":keyword"); - -$(document).ready(function() { - -function appendHeader() { - var VERSION_SELECTOR = "" - var i = 0; - var prefix; - for (i = 0; i < searcheable_versions.length; i++ ) { - if (searcheable_versions[i] === 'devel') { - prefix = ''; - } else { - prefix = 'v'; - } - VERSION_SELECTOR += '\n'; - } - - var div = document.createElement('div'); - div.innerHTML = '
\n' + - ' \n' + - '
\n' + - ' \n' + - ' \n' + - ' \n' + - '
\n' + - ' \n' + - ' \n' + - ' \n' + - '
\n'; - - $('.book').before(div.innerHTML); - - }; - - - function rerenderNavbar() { - $('.arangodb-header').remove(); - appendHeader(); - }; - - //render header - rerenderNavbar(); - function addGoogleSrc() { - var gcse = document.createElement('script'); - gcse.type = 'text/javascript'; - gcse.async = true; - gcse.src = (document.location.protocol == 'https:' ? 'https:' : 'http:') + - '//cse.google.com/cse.js?cx=' + cx; - var s = document.getElementsByTagName('script')[0]; - s.parentNode.insertBefore(gcse, s); - }; - addGoogleSrc(); - - $(".arangodb-navmenu a[data-book]").on("click", function(e) { - e.preventDefault(); - var urlSplit = gitbook.state.root.split("/"); - urlSplit.pop(); // "" - urlSplit.pop(); // e.g. "Manual" - window.location.href = urlSplit.join("/") + "/" + e.target.getAttribute("data-book") + "/index.html"; - }); - - // set again using jQuery to accommodate non-standard browsers (*cough* IE *cough*) - var bookVersion = gitbook.state.root.match(/\/(\d\.\d(\.\d)?|devel)\//); - var switcher = $(".arangodb-version-switcher"); - if (bookVersion) { - switcher.val(bookVersion[1]); - } else { - switcher.hide(); - } - - $(".arangodb-version-switcher").on("change", function(e) { - var urlSplit = gitbook.state.root.split("/"); - urlSplit.pop(); // "" - var currentBook = urlSplit.pop(); // e.g. "Manual" - urlSplit.pop() // e.g. "3.0" - if (e.target.value == "2.8") { - var legacyMap = { - "Manual": "", - "AQL": "/Aql", - "HTTP": "/HttpApi", - "Cookbook": "/Cookbook" - }; - currentBook = legacyMap[currentBook]; - } else { - currentBook = "/" + currentBook; - } - window.location.href = urlSplit.join("/") + "/" + e.target.value + currentBook + "/index.html"; - }); - -}); - -}; diff --git a/Documentation/Books/Cookbook/styles/hs.js b/Documentation/Books/Cookbook/styles/hs.js deleted file mode 100644 index 9a8ae18a61d2..000000000000 --- a/Documentation/Books/Cookbook/styles/hs.js +++ /dev/null @@ -1,33 +0,0 @@ -// HubSpot Script Loader. Please do not block this resource. See more: http://hubs.ly/H0702_H0 - -(function (id, src, attrs) { - if (document.getElementById(id)) { - try { console.warn('duplicate hubspot script with id: "' + id + '" included on page'); } - finally { return; } - } - var js = document.createElement('script'); - js.src = src; - js.type = 'text/javascript'; - js.id = id; - for (var name in attrs) { if(attrs.hasOwnProperty(name)) { js.setAttribute(name, attrs[name]); } } - var e = document.getElementsByTagName('script')[0]; - e.parentNode.insertBefore(js, e); -})('hubspot-messages-loader', 'https://js.usemessages.com/messageswidgetshell.js', {"data-loader":"hs-scriptloader","data-hsjs-portal":2482448,"data-hsjs-env":"prod"}); - -(function (id, src) { - if (document.getElementById(id)) { return; } - var js = document.createElement('script'); - js.src = src; - js.type = 'text/javascript'; - js.id = id; - var e = document.getElementsByTagName('script')[0]; - e.parentNode.insertBefore(js, e); -})('hs-analytics', '//js.hs-analytics.net/analytics/1508760300000/2482448.js'); - -window.setTimeout(function () { - $('body').on('click', 'a', function () { - var _hsq = window._hsq = window._hsq || []; - _hsq.push(['setPath', window.location.pathname]); - _hsq.push(['trackPageView']); - }); -}, 1000); diff --git a/Documentation/Books/Cookbook/styles/website.css b/Documentation/Books/Cookbook/styles/website.css deleted file mode 100755 index 0bbc2f1eff37..000000000000 --- a/Documentation/Books/Cookbook/styles/website.css +++ /dev/null @@ -1,84 +0,0 @@ -.markdown-section small { - font-size: 80%; -} -.markdown-section sub, .markdown-section sup { - font-size: 75%; - line-height: 0; - position: relative; - vertical-align: baseline; -} -.markdown-section sup { - top: -.5em; -} -.markdown-section sub { - bottom: -.25em; -} - -div.example_show_button { - border: medium solid lightgray; - text-align: center; - position: relative; - top: -10px; - display: flex; - justify-content: center; -} - -.book .book-body .navigation.navigation-next { - right: 10px !important; -} - -.book .book-summary ul.summary li.active>a,.book .book-summary ul.summary li a:hover { - color: #fff !important; - background: #80A54D !important; - text-decoration: none; -} - -.book .book-body .page-wrapper .page-inner section.normal .deprecated{ - background-color: rgba(240,240,0,0.4); -} - -.book .book-body section > ul li:last-child { - margin-bottom: 0.85em; -} - -.book .book-body .alert p:last-child { - margin-bottom: 0; -} - -.columns-3 { - -webkit-column-count: 3; - -moz-column-count: 3; - -ms-column-count: 3; - -o-column-count: 3; - column-count: 3; - columns: 3; -} - -.localized-footer { - opacity: 0.5; -} - -.example-container { - position: relative; -} - -.example-container a.anchorjs-link { - position: absolute; - top: 10px; - right: 10px; - font: 1em/1 anchorjs-icons; -} - -.gsib_a { -padding: 0px !important; -} - -.gsc-control-cse { -border: 0px !important; -background-color: transparent !important; -} - - -.gsc-input { -margin: 0px !important; -} diff --git a/Documentation/Books/Drivers/.gitkeep b/Documentation/Books/Drivers/.gitkeep new file mode 100644 index 000000000000..936ca3adc4e3 --- /dev/null +++ b/Documentation/Books/Drivers/.gitkeep @@ -0,0 +1,5 @@ +Git can not track empty repositories. +This file ensures that the directory is kept. + +Some of the old documentation building scripts are still +used by the new system which copy files into this folder. \ No newline at end of file diff --git a/Documentation/Books/Drivers/FOOTER.html b/Documentation/Books/Drivers/FOOTER.html deleted file mode 100644 index 239869bfaf6a..000000000000 --- a/Documentation/Books/Drivers/FOOTER.html +++ /dev/null @@ -1 +0,0 @@ -© ArangoDB - the native multi-model NoSQL database \ No newline at end of file diff --git a/Documentation/Books/Drivers/GO/ConnectionManagement/README.md b/Documentation/Books/Drivers/GO/ConnectionManagement/README.md deleted file mode 100644 index 67de1a7218c7..000000000000 --- a/Documentation/Books/Drivers/GO/ConnectionManagement/README.md +++ /dev/null @@ -1,85 +0,0 @@ - -# ArangoDB GO Driver - Connection Management -## Failover - -The driver supports multiple endpoints to connect to. All request are in principle -send to the same endpoint until that endpoint fails to respond. -In that case a new endpoint is chosen and the operation is retried. - -The following example shows how to connect to a cluster of 3 servers. - -```go -conn, err := http.NewConnection(http.ConnectionConfig{ - Endpoints: []string{"http://server1:8529", "http://server2:8529", "http://server3:8529"}, -}) -if err != nil { - // Handle error -} -c, err := driver.NewClient(driver.ClientConfig{ - Connection: conn, -}) -if err != nil { - // Handle error -} -``` - -Note that a valid endpoint is an URL to either a standalone server, or a URL to a coordinator -in a cluster. - -## Failover: Exact behavior - -The driver monitors the request being send to a specific server (endpoint). -As soon as the request has been completely written, failover will no longer happen. -The reason for that is that several operations cannot be (safely) retried. -E.g. when a request to create a document has been send to a server and a timeout -occurs, the driver has no way of knowing if the server did or did not create -the document in the database. - -If the driver detects that a request has been completely written, but still gets -an error (other than an error response from Arango itself), it will wrap the -error in a `ResponseError`. The client can test for such an error using `IsResponseError`. - -If a client received a `ResponseError`, it can do one of the following: -- Retry the operation and be prepared for some kind of duplicate record / unique constraint violation. -- Perform a test operation to see if the "failed" operation did succeed after all. -- Simply consider the operation failed. This is risky, since it can still be the case that the operation did succeed. - -## Failover: Timeouts - -To control the timeout of any function in the driver, you must pass it a context -configured with `context.WithTimeout` (or `context.WithDeadline`). - -In the case of multiple endpoints, the actual timeout used for requests will be shorter than -the timeout given in the context. -The driver will divide the timeout by the number of endpoints with a maximum of 3. -This ensures that the driver can try up to 3 different endpoints (in case of failover) without -being canceled due to the timeout given by the client. -E.g. -- With 1 endpoint and a given timeout of 1 minute, the actual request timeout will be 1 minute. -- With 3 endpoints and a given timeout of 1 minute, the actual request timeout will be 20 seconds. -- With 8 endpoints and a given timeout of 1 minute, the actual request timeout will be 20 seconds. - -For most requests you want a actual request timeout of at least 30 seconds. - -## Secure connections (SSL) - -The driver supports endpoints that use SSL using the `https` URL scheme. - -The following example shows how to connect to a server that has a secure endpoint using -a self-signed certificate. - -```go -conn, err := http.NewConnection(http.ConnectionConfig{ - Endpoints: []string{"https://localhost:8529"}, - TLSConfig: &tls.Config{InsecureSkipVerify: true}, -}) -if err != nil { - // Handle error -} -c, err := driver.NewClient(driver.ClientConfig{ - Connection: conn, -}) -if err != nil { - // Handle error -} -``` diff --git a/Documentation/Books/Drivers/GO/ExampleRequests/README.md b/Documentation/Books/Drivers/GO/ExampleRequests/README.md deleted file mode 100644 index 9bc9909658dc..000000000000 --- a/Documentation/Books/Drivers/GO/ExampleRequests/README.md +++ /dev/null @@ -1,183 +0,0 @@ - -# ArangoDB GO Driver - Example requests - -## Connecting to ArangoDB - -```go -conn, err := http.NewConnection(http.ConnectionConfig{ - Endpoints: []string{"http://localhost:8529"}, - TLSConfig: &tls.Config{ /*...*/ }, -}) -if err != nil { - // Handle error -} -c, err := driver.NewClient(driver.ClientConfig{ - Connection: conn, - Authentication: driver.BasicAuthentication("user", "password"), -}) -if err != nil { - // Handle error -} -``` - -## Opening a database - -```go -ctx := context.Background() -db, err := client.Database(ctx, "myDB") -if err != nil { - // handle error -} -``` - -## Opening a collection - -```go -ctx := context.Background() -col, err := db.Collection(ctx, "myCollection") -if err != nil { - // handle error -} -``` - -## Checking if a collection exists - -```go -ctx := context.Background() -found, err := db.CollectionExists(ctx, "myCollection") -if err != nil { - // handle error -} -``` - -## Creating a collection - -```go -ctx := context.Background() -options := &driver.CreateCollectionOptions{ /* ... */ } -col, err := db.CreateCollection(ctx, "myCollection", options) -if err != nil { - // handle error -} -``` - -## Reading a document from a collection - -```go -var doc MyDocument -ctx := context.Background() -meta, err := col.ReadDocument(ctx, myDocumentKey, &doc) -if err != nil { - // handle error -} -``` - -## Reading a document from a collection with an explicit revision - -```go -var doc MyDocument -revCtx := driver.WithRevision(ctx, "mySpecificRevision") -meta, err := col.ReadDocument(revCtx, myDocumentKey, &doc) -if err != nil { - // handle error -} -``` - -## Creating a document - -```go -doc := MyDocument{ - Name: "jan", - Counter: 23, -} -ctx := context.Background() -meta, err := col.CreateDocument(ctx, doc) -if err != nil { - // handle error -} -fmt.Printf("Created document with key '%s', revision '%s'\n", meta.Key, meta.Rev) -``` - -## Removing a document - -```go -ctx := context.Background() -err := col.RemoveDocument(revCtx, myDocumentKey) -if err != nil { - // handle error -} -``` - -## Removing a document with an explicit revision - -```go -revCtx := driver.WithRevision(ctx, "mySpecificRevision") -err := col.RemoveDocument(revCtx, myDocumentKey) -if err != nil { - // handle error -} -``` - -## Updating a document - -```go -ctx := context.Background() -patch := map[string]interface{}{ - "Name": "Frank", -} -meta, err := col.UpdateDocument(ctx, myDocumentKey, patch) -if err != nil { - // handle error -} -``` - -## Querying documents, one document at a time - -```go -ctx := context.Background() -query := "FOR d IN myCollection LIMIT 10 RETURN d" -cursor, err := db.Query(ctx, query, nil) -if err != nil { - // handle error -} -defer cursor.Close() -for { - var doc MyDocument - meta, err := cursor.ReadDocument(ctx, &doc) - if driver.IsNoMoreDocuments(err) { - break - } else if err != nil { - // handle other errors - } - fmt.Printf("Got doc with key '%s' from query\n", meta.Key) -} -``` - -## Querying documents, fetching total count - -```go -ctx := driver.WithQueryCount(context.Background()) -query := "FOR d IN myCollection RETURN d" -cursor, err := db.Query(ctx, query, nil) -if err != nil { - // handle error -} -defer cursor.Close() -fmt.Printf("Query yields %d documents\n", cursor.Count()) -``` - -## Querying documents, with bind variables - -```go -ctx := context.Background() -query := "FOR d IN myCollection FILTER d.Name == @name RETURN d" -bindVars := map[string]interface{}{ - "name": "Some name", -} -cursor, err := db.Query(ctx, query, bindVars) -if err != nil { - // handle error -} -defer cursor.Close() -... -``` diff --git a/Documentation/Books/Drivers/GO/GettingStarted/README.md b/Documentation/Books/Drivers/GO/GettingStarted/README.md deleted file mode 100644 index cfc6ec990d65..000000000000 --- a/Documentation/Books/Drivers/GO/GettingStarted/README.md +++ /dev/null @@ -1,142 +0,0 @@ - -# ArangoDB GO Driver - Getting Started - -## Supported versions - -- ArangoDB versions 3.1 and up. - - Single server & cluster setups - - With or without authentication -- Go 1.7 and up. - -## Go dependencies - -- None (Additional error libraries are supported). - -## Configuration - -To use the driver, first fetch the sources into your GOPATH. - -```sh -go get github.com/arangodb/go-driver -``` - -Using the driver, you always need to create a `Client`. -The following example shows how to create a `Client` for a single server -running on localhost. - -```go -import ( - "fmt" - - driver "github.com/arangodb/go-driver" - "github.com/arangodb/go-driver/http" -) - -... - -conn, err := http.NewConnection(http.ConnectionConfig{ - Endpoints: []string{"http://localhost:8529"}, -}) -if err != nil { - // Handle error -} -c, err := driver.NewClient(driver.ClientConfig{ - Connection: conn, -}) -if err != nil { - // Handle error -} -``` - -Once you have a `Client` you can access/create databases on the server, -access/create collections, graphs, documents and so on. - -The following example shows how to open an existing collection in an existing database -and create a new document in that collection. - -```go -// Open "examples_books" database -db, err := c.Database(nil, "examples_books") -if err != nil { - // Handle error -} - -// Open "books" collection -col, err := db.Collection(nil, "books") -if err != nil { - // Handle error -} - -// Create document -book := Book{ - Title: "ArangoDB Cookbook", - NoPages: 257, -} -meta, err := col.CreateDocument(nil, book) -if err != nil { - // Handle error -} -fmt.Printf("Created document in collection '%s' in database '%s'\n", col.Name(), db.Name()) -``` - -## API design - -### Concurrency - -All functions of the driver are stricly synchronous. They operate and only return a value (or error) -when they're done. - -If you want to run operations concurrently, use a go routine. All objects in the driver are designed -to be used from multiple concurrent go routines, except `Cursor`. - -All database objects (except `Cursor`) are considered static. After their creation they won't change. -E.g. after creating a `Collection` instance you can remove the collection, but the (Go) instance -will still be there. Calling functions on such a removed collection will of course fail. - -### Structured error handling & wrapping - -All functions of the driver that can fail return an `error` value. If that value is not `nil`, the -function call is considered to be failed. In that case all other return values are set to their `zero` -values. - -All errors are structured using error checking functions named `Is`. -E.g. `IsNotFound(error)` return true if the given error is of the category "not found". -There can be multiple internal error codes that all map onto the same category. - -All errors returned from any function of the driver (either internal or exposed) wrap errors -using the `WithStack` function. This can be used to provide detail stack trackes in case of an error. -All error checking functions use the `Cause` function to get the cause of an error instead of the error wrapper. - -Note that `WithStack` and `Cause` are actually variables to you can implement it using your own error -wrapper library. - -If you for example use https://github.com/pkg/errors, you want to initialize to go driver like this: -```go -import ( - driver "github.com/arangodb/go-driver" - "github.com/pkg/errors" -) - -func init() { - driver.WithStack = errors.WithStack - driver.Cause = errors.Cause -} -``` - -### Context aware - -All functions of the driver that involve some kind of long running operation or -support additional options not given as function arguments, have a `context.Context` argument. -This enables you cancel running requests, pass timeouts/deadlines and pass additional options. - -In all methods that take a `context.Context` argument you can pass `nil` as value. -This is equivalent to passing `context.Background()`. - -Many functions support 1 or more optional (and infrequently used) additional options. -These can be used with a `With` function. -E.g. to force a create document call to wait until the data is synchronized to disk, -use a prepared context like this: -```go -ctx := driver.WithWaitForSync(parentContext) -collection.CreateDocument(ctx, yourDocument) -``` diff --git a/Documentation/Books/Drivers/GO/README.md b/Documentation/Books/Drivers/GO/README.md deleted file mode 100644 index 4701bd6c6e29..000000000000 --- a/Documentation/Books/Drivers/GO/README.md +++ /dev/null @@ -1,9 +0,0 @@ - -# ArangoDB GO Driver - -The official [ArangoDB](https://arangodb.com) GO Driver - -- [Getting Started](GettingStarted/README.md) -- [Example Requests](ExampleRequests/README.md) -- [Connection Management](ConnectionManagement/README.md) -- [Reference](https://godoc.org/github.com/arangodb/go-driver) diff --git a/Documentation/Books/Drivers/JS/GettingStarted/README.md b/Documentation/Books/Drivers/JS/GettingStarted/README.md deleted file mode 100644 index d8fb21cef3ba..000000000000 --- a/Documentation/Books/Drivers/JS/GettingStarted/README.md +++ /dev/null @@ -1,234 +0,0 @@ - -# ArangoDB JavaScript Driver - Getting Started - -## Compatibility - -ArangoJS is compatible with the latest stable version of ArangoDB available at -the time of the driver release. - -The [_arangoVersion_ option](../Reference/Database/README.md) -can be used to tell arangojs to target a specific -ArangoDB version. Depending on the version this will enable or disable certain -methods and change behavior to maintain compatibility with the given version. -The oldest version of ArangoDB supported by arangojs when using this option -is 2.8.0 (using `arangoVersion: 20800`). - -The yarn/npm distribution of ArangoJS is compatible with Node.js versions 9.x -(latest), 8.x (LTS) and 6.x (LTS). Node.js version support follows -[the official Node.js long-term support schedule](https://github.com/nodejs/LTS). - -The included browser build is compatible with Internet Explorer 11 and recent -versions of all modern browsers (Edge, Chrome, Firefox and Safari). - -Versions outside this range may be compatible but are not actively supported. - -**Note**: Starting with arangojs 6.0.0, all asynchronous functions return -promises. If you are using a version of Node.js older than Node.js 6.x LTS -("Boron") make sure you replace the native `Promise` implementation with a -substitute like [bluebird](https://github.com/petkaantonov/bluebird) -to avoid a known memory leak in older versions of the V8 JavaScript engine. - -## Versions - -The version number of this driver does not indicate supported ArangoDB versions! - -This driver uses semantic versioning: - -- A change in the bugfix version (e.g. X.Y.0 -> X.Y.1) indicates internal - changes and should always be safe to upgrade. -- A change in the minor version (e.g. X.1.Z -> X.2.0) indicates additions and - backwards-compatible changes that should not affect your code. -- A change in the major version (e.g. 1.Y.Z -> 2.0.0) indicates _breaking_ - changes that require changes in your code to upgrade. - -If you are getting weird errors or functions seem to be missing, make sure you -are using the latest version of the driver and following documentation written -for a compatible version. If you are following a tutorial written for an older -version of arangojs, you can install that version using the `@` -syntax: - -```sh -# for version 4.x.x -yarn add arangojs@4 -# - or - -npm install --save arangojs@4 -``` - -You can find the documentation for each version by clicking on the corresponding -date on the left in -[the list of version tags](https://github.com/arangodb/arangojs/tags). - -## Install - -### With Yarn or NPM - -```sh -yarn add arangojs -# - or - -npm install --save arangojs -``` - -### With Bower - -Starting with arangojs 6.0.0 Bower is no longer supported and the browser -build is now included in the NPM release (see below). - -### From source - -```sh -git clone https://github.com/arangodb/arangojs.git -cd arangojs -npm install -npm run dist -``` - -### For browsers - -For production use arangojs can be installed with Yarn or NPM like any -other dependency. Just use arangojs like you would in your server code: - -```js -import { Database } from "arangojs"; -// -- or -- -var arangojs = require("arangojs"); -``` - -Additionally the NPM release comes with a precompiled browser build: - -```js -var arangojs = require("arangojs/lib/web"); -``` - -You can also use [unpkg](https://unpkg.com) during development: - -```html -< !-- note the path includes the version number (e.g. 6.0.0) -- > - - -``` - -If you are targetting browsers older than Internet Explorer 11 you may want to -use [babel](https://babeljs.io) with a -[polyfill](https://babeljs.io/docs/usage/polyfill) to provide missing -functionality needed to use arangojs. - -When loading the browser build with a script tag make sure to load the polyfill first: - -```html - - -``` - -## Basic usage example - -```js -// Modern JavaScript -import { Database, aql } from "arangojs"; -const db = new Database(); -(async function() { - const now = Date.now(); - try { - const cursor = await db.query(aql` - RETURN ${now} - `); - const result = await cursor.next(); - // ... - } catch (err) { - // ... - } -})(); - -// or plain old Node-style -var arangojs = require("arangojs"); -var db = new arangojs.Database(); -var now = Date.now(); -db.query({ - query: "RETURN @value", - bindVars: { value: now } -}) - .then(function(cursor) { - return cursor.next().then(function(result) { - // ... - }); - }) - .catch(function(err) { - // ... - }); - -// Using different databases -const db = new Database({ - url: "http://localhost:8529" -}); -db.useDatabase("pancakes"); -db.useBasicAuth("root", ""); -// The database can be swapped at any time -db.useDatabase("waffles"); -db.useBasicAuth("admin", "maplesyrup"); - -// Using ArangoDB behind a reverse proxy -const db = new Database({ - url: "http://myproxy.local:8000", - isAbsolute: true // don't automatically append database path to URL -}); - -// Trigger ArangoDB 2.8 compatibility mode -const db = new Database({ - arangoVersion: 20800 -}); -``` - -For AQL please check out the [aql template tag](../Reference/Database/Queries.md#aql) for writing parametrized -AQL queries without making your code vulnerable to injection attacks. - -## Error responses - -If arangojs encounters an API error, it will throw an _ArangoError_ with an -[_errorNum_ error code](../../..//Manual/Appendix/ErrorCodes.html) -as well as a _code_ and _statusCode_ property indicating the intended and -actual HTTP status code of the response. - -For any other error responses (4xx/5xx status code), it will throw an -_HttpError_ error with the status code indicated by the _code_ and _statusCode_ properties. - -If the server response did not indicate an error but the response body could -not be parsed, a _SyntaxError_ may be thrown instead. - -In all of these cases the error object will additionally have a _response_ -property containing the server response object. - -If the request failed at a network level or the connection was closed without -receiving a response, the underlying error will be thrown instead. - -**Examples** - -```js -// Using async/await -try { - const info = await db.createDatabase("mydb"); - // database created -} catch (err) { - console.error(err.stack); -} - -// Using promises with arrow functions -db.createDatabase("mydb").then( - info => { - // database created - }, - err => console.error(err.stack) -); -``` - -{% hint 'tip' %} -The examples in the remainder of this documentation use `async`/`await` -and other modern language features like multi-line strings and template tags. -When developing for an environment without support for these language features, -substitute promises for `await` syntax as in the above example. -{% endhint %} diff --git a/Documentation/Books/Drivers/JS/README.md b/Documentation/Books/Drivers/JS/README.md deleted file mode 100644 index bb12b7aa50be..000000000000 --- a/Documentation/Books/Drivers/JS/README.md +++ /dev/null @@ -1,16 +0,0 @@ - -# ArangoDB JavaScript Driver - -The official ArangoDB low-level JavaScript client. - -**Note:** if you are looking for the ArangoDB JavaScript API in -[Foxx](https://foxx.arangodb.com) (or the `arangosh` interactive shell) please -refer to the documentation about the -[`@arangodb` module](../..//Manual/Foxx/Reference/Modules/index.html#the-arangodb-module) -instead; specifically the `db` object exported by the `@arangodb` module. The -JavaScript driver is **only** meant to be used when accessing ArangoDB from -**outside** the database. - -- [Getting Started](GettingStarted/README.md) -- [Reference](Reference/README.md) -- [Changelog](https://github.com/arangodb/arangojs/blob/master/CHANGELOG.md#readme) diff --git a/Documentation/Books/Drivers/JS/Reference/Aql.md b/Documentation/Books/Drivers/JS/Reference/Aql.md deleted file mode 100644 index 313c8c4ee088..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Aql.md +++ /dev/null @@ -1,151 +0,0 @@ - -# AQL Helpers - -These helpers are available via the `aql` export from the arangojs module: - -```js -import arangojs, { aql } from "arangojs"; - -// or CommonJS: - -const arangojs = require("arangojs"); -const aql = arangojs.aql; -``` - -## aql - -`aql: AqlQuery` - -The `aql` function is a JavaScript template string handler (or template tag). -It can be used to write complex AQL queries as multi-line strings without -having to worry about bindVars and the distinction between collections -and regular parameters. - -To use it just prefix a JavaScript template string (the ones with backticks -instead of quotes) with its import name (e.g. `aql`) and pass in variables -like you would with a regular template string. The string will automatically -be converted into an object with `query` and `bindVars` attributes which you -can pass directly to `db.query` to execute. If you pass in a collection it -will be automatically recognized as a collection reference -and handled accordingly. - -The `aql` template tag can also be used inside other `aql` template strings, -allowing arbitrary nesting. Bind parameters of nested queries will be merged -automatically. - -**Examples** - -```js -const filterValue = 23; -const mydata = db.collection("mydata"); -const result = await db.query(aql` - FOR d IN ${mydata} - FILTER d.num > ${filterValue} - RETURN d -`); - -// nested queries - -const color = "green"; -const filterByColor = aql`FILTER d.color == ${color}'`; -const result2 = await db.query(aql` - FOR d IN ${mydata} - ${filterByColor} - RETURN d -`); -``` - -## aql.literal - -`aql.literal(value): AqlLiteral` - -The `aql.literal` helper can be used to mark strings to be inlined into an AQL -query when using the `aql` template tag, rather than being treated as a bind -parameter. - -{% hint 'danger' %} -Any value passed to `aql.literal` will be treated as part of the AQL query. -To avoid becoming vulnerable to AQL injection attacks you should always prefer -nested `aql` queries if possible. -{% endhint %} - -**Arguments** - -- **value**: `string` - - An arbitrary string that will be treated as a literal AQL fragment when used - in an `aql` template. - -**Examples** - -```js -const filterGreen = aql.literal('FILTER d.color == "green"'); -const result = await db.query(aql` - FOR d IN ${mydata} - ${filterGreen} - RETURN d -`); -``` - -## aql.join - -`aql.join(values)` - -The `aql.join` helper takes an array of queries generated using the `aql` tag -and combines them into a single query. The optional second argument will be -used as literal string to combine the queries. - -**Arguments** - -- **values**: `Array` - - An array of arbitrary values, typically AQL query objects or AQL literals. - -- **sep**: `string` (Default: `" "`) - - String that will be used in between the values. - -**Examples** - -```js -// Basic usage -const parts = [aql`FILTER`, aql`x`, aql`%`, aql`2`]; -const joined = aql.join(parts); // aql`FILTER x % 2` - -// Merge without the extra space -const parts = [aql`FIL`, aql`TER`]; -const joined = aql.join(parts, ""); // aql`FILTER`; - -// Real world example: translate keys into document lookups -const users = db.collection("users"); -const keys = ["abc123", "def456"]; -const docs = keys.map(key => aql`DOCUMENT(${users}, ${key})`); -const aqlArray = aql`[${aql.join(docs, ", ")}]`; -const result = await db.query(aql` - FOR d IN ${aqlArray} - RETURN d -`); -// Query: -// FOR d IN [DOCUMENT(@@value0, @value1), DOCUMENT(@@value0, @value2)] -// RETURN d -// Bind parameters: -// @value0: "users" -// value1: "abc123" -// value2: "def456" - -// Alternative without `aql.join` -const users = db.collection("users"); -const keys = ["abc123", "def456"]; -const result = await db.query(aql` - FOR key IN ${keys} - LET d = DOCUMENT(${users}, key) - RETURN d -`); -// Query: -// FOR key IN @value0 -// LET d = DOCUMENT(@@value1, key) -// RETURN d -// Bind parameters: -// value0: ["abc123", "def456"] -// @value1: "users" -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/BulkImport.md b/Documentation/Books/Drivers/JS/Reference/Collection/BulkImport.md deleted file mode 100644 index 460ad269fc46..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Collection/BulkImport.md +++ /dev/null @@ -1,152 +0,0 @@ - -# Bulk importing documents - -This function implements the -[HTTP API for bulk imports](../../../..//HTTP/BulkImports/index.html). - -## collection.import - -`async collection.import(data, [opts]): Object` - -Bulk imports the given _data_ into the collection. - -**Arguments** - -- **data**: `Array | Buffer | string` - - The data to import. Depending on the _type_ option this can be any of the - following: - - For type `"documents"` or `"auto"`: - - - an array of documents, e.g. - - ```json - [ - { "_key": "banana", "color": "yellow" }, - { "_key": "peach", "color": "pink" } - ] - ``` - - - a string or buffer containing one JSON document per line, e.g. - - ``` - {"_key":"banana","color":"yellow"} - {"_key":"peach","color":"pink"} - ``` - - For type `"array"` or `"auto"`: - - - a string or buffer containing a JSON array of documents, e.g. - - ```json - [ - { "_key": "banana", "color": "yellow" }, - { "_key": "peach", "color": "pink" } - ] - ``` - - For type `null`: - - - an array containing an array of keys followed by arrays of values, e.g. - - ``` - [ - ["_key", "color"], - ["banana", "yellow"], - ["peach", "pink"] - ] - ``` - - - a string or buffer containing a JSON array of keys followed by - one JSON array of values per line, e.g. - - ``` - ["_key", "color"] - ["banana", "yellow"] - ["peach", "pink"] - ``` - -- **opts**: `Object` (optional) - - If _opts_ is set, it must be an object with any of the following properties: - - - **type**: `string | null` (Default: `"auto"`) - - Indicates which format the data uses. - Can be `"documents"`, `"array"` or `"auto"`. - Use `null` to explicitly set no type. - - - **fromPrefix**: `string` (optional) - - Prefix to prepend to `_from` attributes. - - - **toPrefix**: `string` (optional) - - Prefix to prepend to `_to` attributes. - - - **overwrite**: `boolean` (Default: `false`) - - If set to `true`, the collection is truncated before the data is imported. - - - **waitForSync**: `boolean` (Default: `false`) - - Wait until the documents have been synced to disk. - - - **onDuplicate**: `string` (Default: `"error"`) - - Controls behavior when a unique constraint is violated. - Can be `"error"`, `"update"`, `"replace"` or `"ignore"`. - - - **complete**: `boolean` (Default: `false`) - - If set to `true`, the import will abort if any error occurs. - - - **details**: `boolean` (Default: `false`) - - Whether the response should contain additional details about documents that - could not be imported. - -For more information on the _opts_ object, see the -[HTTP API documentation for bulk imports](../../../..//HTTP/BulkImports/index.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection("users"); - -const result = await collection.import( - [ - { username: "jcd", password: "bionicman" }, - { username: "jreyes", password: "amigo" }, - { username: "ghermann", password: "zeitgeist" } - ], - { type: "documents" } // optional -); - -// -- or -- - -const buf = fs.readFileSync("dx_users.json"); -// [ -// {"username": "jcd", "password": "bionicman"}, -// {"username": "jreyes", "password": "amigo"}, -// {"username": "ghermann", "password": "zeitgeist"} -// ] -const result = await collection.import( - buf, - { type: "array" } // optional -); - -// -- or -- - -const result = await collection.import( - [ - ["username", "password"], - ["jcd", "bionicman"], - ["jreyes", "amigo"], - ["ghermann", "zeitgeist"] - ], - { type: null } // required -); -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/CollectionManipulation.md b/Documentation/Books/Drivers/JS/Reference/Collection/CollectionManipulation.md deleted file mode 100644 index eb6d4aa1c159..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Collection/CollectionManipulation.md +++ /dev/null @@ -1,175 +0,0 @@ - -# Manipulating the collection - -These functions implement the -[HTTP API for modifying collections](../../../..//HTTP/Collection/Modifying.html). - -## collection.create - -`async collection.create([properties]): Object` - -Creates a collection with the given _properties_ for this collection's name, -then returns the server response. - -**Arguments** - -- **properties**: `Object` (optional) - - For more information on the _properties_ object, see the - [HTTP API documentation for creating collections](../../../..//HTTP/Collection/Creating.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('potatoes'); -await collection.create() -// the document collection "potatoes" now exists - -// -- or -- - -const collection = db.edgeCollection('friends'); -await collection.create({ - waitForSync: true // always sync document changes to disk -}); -// the edge collection "friends" now exists -``` - -## collection.load - -`async collection.load([count]): Object` - -Tells the server to load the collection into memory. - -**Arguments** - -- **count**: `boolean` (Default: `true`) - - If set to `false`, the return value will not include the number of documents - in the collection (which may speed up the process). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -await collection.load(false) -// the collection has now been loaded into memory -``` - -## collection.unload - -`async collection.unload(): Object` - -Tells the server to remove the collection from memory. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -await collection.unload() -// the collection has now been unloaded from memory -``` - -## collection.setProperties - -`async collection.setProperties(properties): Object` - -Replaces the properties of the collection. - -**Arguments** - -- **properties**: `Object` - - For information on the _properties_ argument see the - [HTTP API for modifying collections](../../../..//HTTP/Collection/Modifying.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const result = await collection.setProperties({waitForSync: true}) -assert.equal(result.waitForSync, true); -// the collection will now wait for data being written to disk -// whenever a document is changed -``` - -## collection.rename - -`async collection.rename(name): Object` - -Renames the collection. The _Collection_ instance will automatically update its -name when the rename succeeds. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const result = await collection.rename('new-collection-name') -assert.equal(result.name, 'new-collection-name'); -assert.equal(collection.name, result.name); -// result contains additional information about the collection -``` - -## collection.rotate - -`async collection.rotate(): Object` - -Rotates the journal of the collection. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const data = await collection.rotate(); -// data.result will be true if rotation succeeded -``` - -## collection.truncate - -`async collection.truncate(): Object` - -Deletes **all documents** in the collection in the database. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -await collection.truncate(); -// the collection "some-collection" is now empty -``` - -## collection.drop - -`async collection.drop([properties]): Object` - -Deletes the collection from the database. - -**Arguments** - -- **properties**: `Object` (optional) - - An object with the following properties: - - - **isSystem**: `Boolean` (Default: `false`) - - Whether the collection should be dropped even if it is a system collection. - - This parameter must be set to `true` when dropping a system collection. - - For more information on the _properties_ object, see the - [HTTP API documentation for dropping collections](../../../..//HTTP/Collection/Creating.html#drops-a-collection). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -await collection.drop(); -// the collection "some-collection" no longer exists -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/DocumentCollection.md b/Documentation/Books/Drivers/JS/Reference/Collection/DocumentCollection.md deleted file mode 100644 index 2d690f5a7f6a..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Collection/DocumentCollection.md +++ /dev/null @@ -1,185 +0,0 @@ - -# DocumentCollection API - -The _DocumentCollection API_ extends the -[_Collection API_](README.md) with the following methods. - -## documentCollection.document - -`async documentCollection.document(documentHandle, [opts]): Document` - -Retrieves the document with the given _documentHandle_ from the collection. - -**Arguments** - -- **documentHandle**: `string` - - The handle of the document to retrieve. This can be either the `_id` or the - `_key` of a document in the collection, or a document (i.e. an object with an - `_id` or `_key` property). - -- **opts**: `Object` (optional) - - If _opts_ is set, it must be an object with any of the following properties: - - - **graceful**: `boolean` (Default: `false`) - - If set to `true`, the method will return `null` instead of throwing an - error if the document does not exist. - - - **allowDirtyRead**: `boolean` (Default: `false`) - - {% hint 'info' %} - This option is only available when targeting ArangoDB 3.4 or later, - see [Compatibility](../../GettingStarted/README.md#compatibility). - {% endhint %} - - If set to `true`, the request will explicitly permit ArangoDB to return a - potentially dirty or stale result and arangojs will load balance the - request without distinguishing between leaders and followers. - -If a boolean is passed instead of an options object, it will be interpreted as -the _graceful_ option. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection("my-docs"); - -try { - const doc = await collection.document("some-key"); - // the document exists - assert.equal(doc._key, "some-key"); - assert.equal(doc._id, "my-docs/some-key"); -} catch (err) { - // something went wrong or - // the document does not exist -} - -// -- or -- - -try { - const doc = await collection.document("my-docs/some-key"); - // the document exists - assert.equal(doc._key, "some-key"); - assert.equal(doc._id, "my-docs/some-key"); -} catch (err) { - // something went wrong or - // the document does not exist -} - -// -- or -- - -const doc = await collection.document("some-key", true); -if (doc === null) { - // the document does not exist -} -``` - -## documentCollection.documentExists - -`async documentCollection.documentExists(documentHandle): boolean` - -Checks whether the document with the given _documentHandle_ exists. - -**Arguments** - -- **documentHandle**: `string` - - The handle of the document to retrieve. This can be either the `_id` or the - `_key` of a document in the collection, or a document (i.e. an object with an - `_id` or `_key` property). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection("my-docs"); - -const exists = await collection.documentExists("some-key"); -if (exists === false) { - // the document does not exist -} -``` - -## documentCollection.save - -`async documentCollection.save(data, [opts]): Object` - -Creates a new document with the given _data_ and returns an object containing -the document's metadata (`_id`, `_key` and `_rev` attributes). - -Multiple documents can be created in a single call by passing an array of -objects as argument for _data_. The result will be an array too, of which -some elements can be error objects if the documents couldn't be saved. - -**Arguments** - -- **data**: `Object | Object[]` - - The data of the new document, may include a `_key`. - -- **opts**: `Object` (optional) - - If _opts_ is set, it must be an object with any of the following properties: - - - **waitForSync**: `boolean` (Default: `false`) - - Wait until document has been synced to disk. - - - **returnNew**: `boolean` (Default: `false`) - - If set to `true`, return additionally the complete new document(s) under the - attribute `new` in the result. - - - **returnOld**: `boolean` (Default: `false`) - - If set to `true`, return additionally the complete old document(s) under the - attribute `old` in the result. - - - **silent**: `boolean` (Default: `false`) - - If set to true, an empty object will be returned as response. No meta-data - will be returned for the created document. This option can be used to save - some network traffic. - - - **overwrite**: `boolean` (Default: `false`) - - {% hint 'warning' %} - This option is only available when targeting ArangoDB v3.4.0 and later. - {% endhint %} - - If set to true, the insert becomes a replace-insert. If a document with the - same \_key already exists the new document is not rejected with unique - constraint violated but will replace the old document. - -If a boolean is passed instead of an options object, it will be interpreted as -the _returnNew_ option. - -For more information on the _opts_ object, see the -[HTTP API documentation for working with documents](../../../..//HTTP/Document/WorkingWithDocuments.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection("my-docs"); -const data = { some: "data" }; -const info = await collection.save(data); -assert.equal(info._id, "my-docs/" + info._key); -const doc2 = await collection.document(info); -assert.equal(doc2._id, info._id); -assert.equal(doc2._rev, info._rev); -assert.equal(doc2.some, data.some); - -// -- or -- - -const db = new Database(); -const collection = db.collection("my-docs"); -const data = { some: "data" }; -const opts = { returnNew: true }; -const doc = await collection.save(data, opts); -assert.equal(doc1._id, "my-docs/" + doc1._key); -assert.equal(doc1.new.some, data.some); -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/DocumentManipulation.md b/Documentation/Books/Drivers/JS/Reference/Collection/DocumentManipulation.md deleted file mode 100644 index d12220792cbc..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Collection/DocumentManipulation.md +++ /dev/null @@ -1,318 +0,0 @@ - -# Manipulating documents - -These functions implement the -[HTTP API for manipulating documents](../../../..//HTTP/Document/index.html). - -## collection.replace - -`async collection.replace(documentHandle, newValue, [opts]): Object` - -Replaces the content of the document with the given _documentHandle_ with the -given _newValue_ and returns an object containing the document's metadata. - -**Arguments** - -- **documentHandle**: `string` - - The handle of the document to replace. This can either be the `_id` or the - `_key` of a document in the collection, or a document (i.e. an object with an - `_id` or `_key` property). - -- **newValue**: `Object` - - The new data of the document. - -- **opts**: `Object` (optional) - - If _opts_ is set, it must be an object with any of the following properties: - - - **waitForSync**: `boolean` (Default: `false`) - - Wait until the document has been synced to disk. Default: `false`. - - - **rev**: `string` (optional) - - Only replace the document if it matches this revision. - - - **policy**: `string` (optional) - - {% hint 'warning' %} - This option has no effect in ArangoDB 3.0 and later. - {% endhint %} - - Determines the behavior when the revision is not matched: - - - if _policy_ is set to `"last"`, the document will be replaced regardless - of the revision. - - if _policy_ is set to `"error"` or not set, the replacement will fail with - an error. - -If a string is passed instead of an options object, it will be interpreted as -the _rev_ option. - -For more information on the _opts_ object, see the -[HTTP API documentation for working with documents](../../../..//HTTP/Document/WorkingWithDocuments.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection("some-collection"); -const data = { number: 1, hello: "world" }; -const info1 = await collection.save(data); -const info2 = await collection.replace(info1, { number: 2 }); -assert.equal(info2._id, info1._id); -assert.notEqual(info2._rev, info1._rev); -const doc = await collection.document(info1); -assert.equal(doc._id, info1._id); -assert.equal(doc._rev, info2._rev); -assert.equal(doc.number, 2); -assert.equal(doc.hello, undefined); -``` - -## collection.update - -`async collection.update(documentHandle, newValue, [opts]): Object` - -Updates (merges) the content of the document with the given _documentHandle_ -with the given _newValue_ and returns an object containing the document's -metadata. - -**Arguments** - -- **documentHandle**: `string` - - Handle of the document to update. This can be either the `_id` or the `_key` - of a document in the collection, or a document (i.e. an object with an `_id` - or `_key` property). - -- **newValue**: `Object` - - The new data of the document. - -- **opts**: `Object` (optional) - - If _opts_ is set, it must be an object with any of the following properties: - - - **waitForSync**: `boolean` (Default: `false`) - - Wait until document has been synced to disk. - - - **keepNull**: `boolean` (Default: `true`) - - If set to `false`, properties with a value of `null` indicate that a - property should be deleted. - - - **mergeObjects**: `boolean` (Default: `true`) - - If set to `false`, object properties that already exist in the old document - will be overwritten rather than merged. This does not affect arrays. - - - **returnOld**: `boolean` (Default: `false`) - - If set to `true`, return additionally the complete previous revision of the - changed documents under the attribute `old` in the result. - - - **returnNew**: `boolean` (Default: `false`) - - If set to `true`, return additionally the complete new documents under the - attribute `new` in the result. - - - **ignoreRevs**: `boolean` (Default: `true`) - - By default, or if this is set to true, the `_rev` attributes in the given - documents are ignored. If this is set to false, then any `_rev` attribute - given in a body document is taken as a precondition. The document is only - updated if the current revision is the one specified. - - - **rev**: `string` (optional) - - Only update the document if it matches this revision. - - - **policy**: `string` (optional) - - {% hint 'warning' %} - This option has no effect in ArangoDB 3.0 and later. - {% endhint %} - - Determines the behavior when the revision is not matched: - - - if _policy_ is set to `"last"`, the document will be replaced regardless - of the revision. - - if _policy_ is set to `"error"` or not set, the replacement will fail with - an error. - -If a string is passed instead of an options object, it will be interpreted as -the _rev_ option. - -For more information on the _opts_ object, see the -[HTTP API documentation for working with documents](../../../..//HTTP/Document/WorkingWithDocuments.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection("some-collection"); -const doc = { number: 1, hello: "world" }; -const doc1 = await collection.save(doc); -const doc2 = await collection.update(doc1, { number: 2 }); -assert.equal(doc2._id, doc1._id); -assert.notEqual(doc2._rev, doc1._rev); -const doc3 = await collection.document(doc2); -assert.equal(doc3._id, doc2._id); -assert.equal(doc3._rev, doc2._rev); -assert.equal(doc3.number, 2); -assert.equal(doc3.hello, doc.hello); -``` - -## collection.bulkUpdate - -`async collection.bulkUpdate(documents, [opts]): Object` - -Updates (merges) the content of the documents with the given _documents_ and -returns an array containing the documents' metadata. - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.0 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -**Arguments** - -- **documents**: `Array` - - Documents to update. Each object must have either the `_id` or the `_key` - property. - -- **opts**: `Object` (optional) - - If _opts_ is set, it must be an object with any of the following properties: - - - **waitForSync**: `boolean` (Default: `false`) - - Wait until document has been synced to disk. - - - **keepNull**: `boolean` (Default: `true`) - - If set to `false`, properties with a value of `null` indicate that a - property should be deleted. - - - **mergeObjects**: `boolean` (Default: `true`) - - If set to `false`, object properties that already exist in the old document - will be overwritten rather than merged. This does not affect arrays. - - - **returnOld**: `boolean` (Default: `false`) - - If set to `true`, return additionally the complete previous revision of the - changed documents under the attribute `old` in the result. - - - **returnNew**: `boolean` (Default: `false`) - - If set to `true`, return additionally the complete new documents under the - attribute `new` in the result. - - - **ignoreRevs**: `boolean` (Default: `true`) - - By default, or if this is set to true, the `_rev` attributes in the given - documents are ignored. If this is set to false, then any `_rev` attribute - given in a body document is taken as a precondition. The document is only - updated if the current revision is the one specified. - -For more information on the _opts_ object, see the -[HTTP API documentation for working with documents](../../../..//HTTP/Document/WorkingWithDocuments.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection("some-collection"); -const doc1 = { number: 1, hello: "world1" }; -const info1 = await collection.save(doc1); -const doc2 = { number: 2, hello: "world2" }; -const info2 = await collection.save(doc2); -const result = await collection.bulkUpdate( - [{ _key: info1._key, number: 3 }, { _key: info2._key, number: 4 }], - { returnNew: true } -); -``` - -## collection.remove - -`async collection.remove(documentHandle, [opts]): Object` - -Deletes the document with the given _documentHandle_ from the collection. - -**Arguments** - -- **documentHandle**: `string` - - The handle of the document to delete. This can be either the `_id` or the - `_key` of a document in the collection, or a document (i.e. an object with an - `_id` or `_key` property). - -- **opts**: `Object` (optional) - - If _opts_ is set, it must be an object with any of the following properties: - - - **waitForSync**: `boolean` (Default: `false`) - - Wait until document has been synced to disk. - - - **rev**: `string` (optional) - - Only update the document if it matches this revision. - - - **policy**: `string` (optional) - - {% hint 'warning' %} - This option has no effect in ArangoDB 3.0 and later. - {% endhint %} - - Determines the behavior when the revision is not matched: - - - if _policy_ is set to `"last"`, the document will be replaced regardless - of the revision. - - if _policy_ is set to `"error"` or not set, the replacement will fail with - an error. - -If a string is passed instead of an options object, it will be interpreted as -the _rev_ option. - -For more information on the _opts_ object, see the -[HTTP API documentation for working with documents](../../../..//HTTP/Document/WorkingWithDocuments.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection("some-collection"); - -await collection.remove("some-doc"); -// document 'some-collection/some-doc' no longer exists - -// -- or -- - -await collection.remove("some-collection/some-doc"); -// document 'some-collection/some-doc' no longer exists -``` - -## collection.list - -`async collection.list([type]): Array` - -Retrieves a list of references for all documents in the collection. - -**Arguments** - -- **type**: `string` (Default: `"id"`) - - The format of the document references: - - - if _type_ is set to `"id"`, each reference will be the `_id` of the - document. - - if _type_ is set to `"key"`, each reference will be the `_key` of the - document. - - if _type_ is set to `"path"`, each reference will be the URI path of the - document. diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/EdgeCollection.md b/Documentation/Books/Drivers/JS/Reference/Collection/EdgeCollection.md deleted file mode 100644 index 26dc648ca5cd..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Collection/EdgeCollection.md +++ /dev/null @@ -1,323 +0,0 @@ - -# EdgeCollection API - -The _EdgeCollection API_ extends the -[_Collection API_](README.md) with the following methods. - -## edgeCollection.document - -`async edgeCollection.document(documentHandle, [opts]): Edge` - -Alias: `edgeCollection.edge`. - -Retrieves the edge with the given _documentHandle_ from the collection. - -**Arguments** - -- **documentHandle**: `string` - - The handle of the edge to retrieve. This can be either the `_id` or the `_key` - of an edge in the collection, or an edge (i.e. an object with an `_id` or - `_key` property). - -- **opts**: `Object` (optional) - - If _opts_ is set, it must be an object with any of the following properties: - - - **graceful**: `boolean` (Default: `false`) - - If set to `true`, the method will return `null` instead of throwing an - error if the edge does not exist. - - - **allowDirtyRead**: `boolean` (Default: `false`) - - {% hint 'info' %} - This option is only available when targeting ArangoDB 3.4 or later, - see [Compatibility](../../GettingStarted/README.md#compatibility). - {% endhint %} - - If set to `true`, the request will explicitly permit ArangoDB to return a - potentially dirty or stale result and arangojs will load balance the - request without distinguishing between leaders and followers. - -If a boolean is passed instead of an options object, it will be interpreted as -the _graceful_ option. - -**Examples** - -```js -const db = new Database(); -const collection = db.edgeCollection("edges"); - -const edge = await collection.document("some-key"); -// the edge exists -assert.equal(edge._key, "some-key"); -assert.equal(edge._id, "edges/some-key"); - -// -- or -- - -const edge = await collection.document("edges/some-key"); -// the edge exists -assert.equal(edge._key, "some-key"); -assert.equal(edge._id, "edges/some-key"); - -// -- or -- - -const edge = await collection.document("some-key", true); -if (edge === null) { - // the edge does not exist -} -``` - -## edgeCollection.documentExists - -`async edgeCollection.documentExists(documentHandle): boolean` - -Checks whether the edge with the given _documentHandle_ exists. - -**Arguments** - -- **documentHandle**: `string` - - The handle of the edge to retrieve. This can be either the `_id` or the - `_key` of a edge in the collection, or an edge (i.e. an object with an - `_id` or `_key` property). - -**Examples** - -```js -const db = new Database(); -const collection = db.edgeCollection("my-docs"); - -const exists = await collection.documentExists("some-key"); -if (exists === false) { - // the edge does not exist -} -``` - -## edgeCollection.save - -`async edgeCollection.save(data, [fromId, toId], [opts]): Object` - -Creates a new edge between the documents _fromId_ and _toId_ with the given -_data_ and returns an object containing the edge's metadata. - -**Arguments** - -- **data**: `Object` - - The data of the new edge. If _fromId_ and _toId_ are not specified, the _data_ - needs to contain the properties `_from` and `_to`. - -- **fromId**: `string` (optional) - - The handle of the start vertex of this edge. This can be either the `_id` of a - document in the database, the `_key` of an edge in the collection, or a - document (i.e. an object with an `_id` or `_key` property). - -- **toId**: `string` (optional) - - The handle of the end vertex of this edge. This can be either the `_id` of a - document in the database, the `_key` of an edge in the collection, or a - document (i.e. an object with an `_id` or `_key` property). - -- **opts**: `Object` (optional) - - If _opts_ is set, it must be an object with any of the following properties: - - - **waitForSync**: `boolean` (Default: `false`) - - Wait until document has been synced to disk. - - - **returnNew**: `boolean` (Default: `false`) - - If set to `true`, return additionally the complete new documents under the - attribute `new` in the result. - - - **returnOld**: `boolean` (Default: `false`) - - If set to `true`, return additionally the complete old documents under the - attribute `old` in the result. - - - **silent**: `boolean` (Default: `false`) - - If set to true, an empty object will be returned as response. No meta-data - will be returned for the created document. This option can be used to save - some network traffic. - - - **overwrite**: `boolean` (Default: `false`) - - If set to true, the insert becomes a replace-insert. If a document with the - same \_key already exists the new document is not rejected with unique - constraint violated but will replace the old document. - -If a boolean is passed instead of an options object, it will be interpreted as -the _returnNew_ option. - -**Examples** - -```js -const db = new Database(); -const collection = db.edgeCollection("edges"); -const data = { some: "data" }; - -const info = await collection.save( - data, - "vertices/start-vertex", - "vertices/end-vertex" -); -assert.equal(info._id, "edges/" + info._key); -const edge = await collection.edge(edge); -assert.equal(edge._key, info._key); -assert.equal(edge._rev, info._rev); -assert.equal(edge.some, data.some); -assert.equal(edge._from, "vertices/start-vertex"); -assert.equal(edge._to, "vertices/end-vertex"); - -// -- or -- - -const info = await collection.save({ - some: "data", - _from: "verticies/start-vertex", - _to: "vertices/end-vertex" -}); -// ... -``` - -## edgeCollection.edges - -`async edgeCollection.edges(documentHandle): Array` - -Retrieves a list of all edges of the document with the given _documentHandle_. - -**Arguments** - -- **documentHandle**: `string` - - The handle of the document to retrieve the edges of. This can be either the - `_id` of a document in the database, the `_key` of an edge in the collection, - or a document (i.e. an object with an `_id` or `_key` property). - -**Examples** - -```js -const db = new Database(); -const collection = db.edgeCollection("edges"); -await collection.import([ - ["_key", "_from", "_to"], - ["x", "vertices/a", "vertices/b"], - ["y", "vertices/a", "vertices/c"], - ["z", "vertices/d", "vertices/a"] -]); -const edges = await collection.edges("vertices/a"); -assert.equal(edges.length, 3); -assert.deepEqual(edges.map(edge => edge._key), ["x", "y", "z"]); -``` - -## edgeCollection.inEdges - -`async edgeCollection.inEdges(documentHandle): Array` - -Retrieves a list of all incoming edges of the document with the given -_documentHandle_. - -**Arguments** - -- **documentHandle**: `string` - - The handle of the document to retrieve the edges of. This can be either the - `_id` of a document in the database, the `_key` of an edge in the collection, - or a document (i.e. an object with an `_id` or `_key` property). - -**Examples** - -```js -const db = new Database(); -const collection = db.edgeCollection("edges"); -await collection.import([ - ["_key", "_from", "_to"], - ["x", "vertices/a", "vertices/b"], - ["y", "vertices/a", "vertices/c"], - ["z", "vertices/d", "vertices/a"] -]); -const edges = await collection.inEdges("vertices/a"); -assert.equal(edges.length, 1); -assert.equal(edges[0]._key, "z"); -``` - -## edgeCollection.outEdges - -`async edgeCollection.outEdges(documentHandle): Array` - -Retrieves a list of all outgoing edges of the document with the given -_documentHandle_. - -**Arguments** - -- **documentHandle**: `string` - - The handle of the document to retrieve the edges of. This can be either the - `_id` of a document in the database, the `_key` of an edge in the collection, - or a document (i.e. an object with an `_id` or `_key` property). - -**Examples** - -```js -const db = new Database(); -const collection = db.edgeCollection("edges"); -await collection.import([ - ["_key", "_from", "_to"], - ["x", "vertices/a", "vertices/b"], - ["y", "vertices/a", "vertices/c"], - ["z", "vertices/d", "vertices/a"] -]); -const edges = await collection.outEdges("vertices/a"); -assert.equal(edges.length, 2); -assert.deepEqual(edges.map(edge => edge._key), ["x", "y"]); -``` - -## edgeCollection.traversal - -`async edgeCollection.traversal(startVertex, opts): Object` - -Performs a traversal starting from the given _startVertex_ and following edges -contained in this edge collection. - -**Arguments** - -- **startVertex**: `string` - - The handle of the start vertex. This can be either the `_id` of a document in - the database, the `_key` of an edge in the collection, or a document (i.e. an - object with an `_id` or `_key` property). - -- **opts**: `Object` - - See the - [HTTP API documentation](../../../..//HTTP/Traversal/index.html) - for details on the additional arguments. - - Please note that while _opts.filter_, _opts.visitor_, _opts.init_, - _opts.expander_ and _opts.sort_ should be strings evaluating to well-formed - JavaScript code, it's not possible to pass in JavaScript functions directly - because the code needs to be evaluated on the server and will be transmitted - in plain text. - -**Examples** - -```js -const db = new Database(); -const collection = db.edgeCollection("edges"); -await collection.import([ - ["_key", "_from", "_to"], - ["x", "vertices/a", "vertices/b"], - ["y", "vertices/b", "vertices/c"], - ["z", "vertices/c", "vertices/d"] -]); -const result = await collection.traversal("vertices/a", { - direction: "outbound", - visitor: "result.vertices.push(vertex._key);", - init: "result.vertices = [];" -}); -assert.deepEqual(result.vertices, ["a", "b", "c", "d"]); -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/Indexes.md b/Documentation/Books/Drivers/JS/Reference/Collection/Indexes.md deleted file mode 100644 index a7f0c52bb1ba..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Collection/Indexes.md +++ /dev/null @@ -1,347 +0,0 @@ - -# Manipulating indexes - -These functions implement the -[HTTP API for manipulating indexes](../../../..//HTTP/Indexes/index.html). - -## collection.createIndex - -`async collection.createIndex(details): Object` - -Creates an arbitrary index on the collection. - -**Arguments** - -- **details**: `Object` - - For information on the possible properties of the _details_ object, see the - [HTTP API for manipulating indexes](../../../..//HTTP/Indexes/WorkingWith.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection("some-collection"); -const index = await collection.createIndex({ - type: "hash", - fields: ["a", "a.b"] -}); -// the index has been created with the handle `index.id` -``` - -## collection.createHashIndex - -`async collection.createHashIndex(fields, [opts]): Object` - -Creates a hash index on the collection. - -**Arguments** - -- **fields**: `Array` - - An array of names of document fields on which to create the index. If the - value is a string, it will be wrapped in an array automatically. - -- **opts**: `Object` (optional) - - Additional options for this index. If the value is a boolean, it will be - interpreted as _opts.unique_. - -For more information on hash indexes, see the -[HTTP API for hash indexes](../../../..//HTTP/Indexes/Hash.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection("some-collection"); - -const index = await collection.createHashIndex("favorite-color"); -// the index has been created with the handle `index.id` -assert.deepEqual(index.fields, ["favorite-color"]); - -// -- or -- - -const index = await collection.createHashIndex(["favorite-color"]); -// the index has been created with the handle `index.id` -assert.deepEqual(index.fields, ["favorite-color"]); -``` - -## collection.createSkipList - -`async collection.createSkipList(fields, [opts]): Object` - -Creates a skiplist index on the collection. - -**Arguments** - -- **fields**: `Array` - - An array of names of document fields on which to create the index. If the - value is a string, it will be wrapped in an array automatically. - -- **opts**: `Object` (optional) - - Additional options for this index. If the value is a boolean, it will be - interpreted as _opts.unique_. - -For more information on skiplist indexes, see the -[HTTP API for skiplist indexes](../../../..//HTTP/Indexes/Skiplist.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection("some-collection"); - -const index = await collection.createSkipList("favorite-color"); -// the index has been created with the handle `index.id` -assert.deepEqual(index.fields, ["favorite-color"]); - -// -- or -- - -const index = await collection.createSkipList(["favorite-color"]); -// the index has been created with the handle `index.id` -assert.deepEqual(index.fields, ["favorite-color"]); -``` - -## collection.createGeoIndex - -`async collection.createGeoIndex(fields, [opts]): Object` - -Creates a geo-spatial index on the collection. - -**Arguments** - -- **fields**: `Array` - - An array of names of document fields on which to create the index. Currently, - geo indexes must cover exactly one field. If the value is a string, it will be - wrapped in an array automatically. - -- **opts**: `Object` (optional) - - An object containing additional properties of the index. - -For more information on the properties of the _opts_ object see the -[HTTP API for manipulating geo indexes](../../../..//HTTP/Indexes/Geo.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection("some-collection"); - -const index = await collection.createGeoIndex(["latitude", "longitude"]); -// the index has been created with the handle `index.id` -assert.deepEqual(index.fields, ["longitude", "latitude"]); - -// -- or -- - -const index = await collection.createGeoIndex("location", { geoJson: true }); -// the index has been created with the handle `index.id` -assert.deepEqual(index.fields, ["location"]); -``` - -## collection.createFulltextIndex - -`async collection.createFulltextIndex(fields, [minLength]): Object` - -Creates a fulltext index on the collection. - -**Arguments** - -- **fields**: `Array` - - An array of names of document fields on which to create the index. Currently, - fulltext indexes must cover exactly one field. If the value is a string, it - will be wrapped in an array automatically. - -- **minLength** (optional): - - Minimum character length of words to index. Uses a server-specific default - value if not specified. - -For more information on fulltext indexes, see -[the HTTP API for fulltext indexes](../../../..//HTTP/Indexes/Fulltext.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection("some-collection"); - -const index = await collection.createFulltextIndex("description"); -// the index has been created with the handle `index.id` -assert.deepEqual(index.fields, ["description"]); - -// -- or -- - -const index = await collection.createFulltextIndex(["description"]); -// the index has been created with the handle `index.id` -assert.deepEqual(index.fields, ["description"]); -``` - -## collection.createPersistentIndex - -`async collection.createPersistentIndex(fields, [opts]): Object` - -Creates a Persistent index on the collection. Persistent indexes are similarly -in operation to skiplist indexes, only that these indexes are in disk as opposed -to in memory. This reduces memory usage and DB startup time, with the trade-off -being that it will always be orders of magnitude slower than in-memory indexes. - -**Arguments** - -- **fields**: `Array` - - An array of names of document fields on which to create the index. - -- **opts**: `Object` (optional) - - An object containing additional properties of the index. - -For more information on the properties of the _opts_ object see -[the HTTP API for manipulating Persistent indexes](../../../..//HTTP/Indexes/Persistent.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection("some-collection"); - -const index = await collection.createPersistentIndex(["name", "email"]); -// the index has been created with the handle `index.id` -assert.deepEqual(index.fields, ["name", "email"]); -``` - -## collection.index - -`async collection.index(indexHandle): Object` - -Fetches information about the index with the given _indexHandle_ and returns it. - -**Arguments** - -- **indexHandle**: `string` - - The handle of the index to look up. This can either be a fully-qualified - identifier or the collection-specific key of the index. If the value is an - object, its _id_ property will be used instead. Alternatively, the index - may be looked up by name. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection("some-collection"); -const index = await collection.createFulltextIndex("description"); -const result = await collection.index(index.id); -assert.equal(result.id, index.id); -// result contains the properties of the index - -// -- or -- - -const result = await collection.index(index.id.split("/")[1]); -assert.equal(result.id, index.id); - -// -- or -- - -const result = await collection.index(index.name); -assert.equal(result.id, index.id); -assert.equal(result.name, index.name); -// result contains the properties of the index -``` - -## collection.indexes - -`async collection.indexes(): Array` - -Fetches a list of all indexes on this collection. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection("some-collection"); -await collection.createFulltextIndex("description"); -const indexes = await collection.indexes(); -assert.equal(indexes.length, 1); -// indexes contains information about the index -``` - -## collection.dropIndex - -`async collection.dropIndex(indexHandle): Object` - -Deletes the index with the given _indexHandle_ from the collection. - -**Arguments** - -- **indexHandle**: `string` - - The handle of the index to delete. This can either be a fully-qualified - identifier or the collection-specific key of the index. If the value is an - object, its _id_ property will be used instead. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection("some-collection"); -const index = await collection.createFulltextIndex("description"); -await collection.dropIndex(index.id); -// the index has been removed from the collection - -// -- or -- - -await collection.dropIndex(index.id.split("/")[1]); -// the index has been removed from the collection -``` - -## collection.createCapConstraint - -`async collection.createCapConstraint(size): Object` - -Creates a cap constraint index on the collection. - -{% hint 'warning' %} -This method is not available when targeting ArangoDB 3.0 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -**Arguments** - -- **size**: `Object` - - An object with any of the following properties: - - - **size**: `number` (optional) - - The maximum number of documents in the collection. - - - **byteSize**: `number` (optional) - - The maximum size of active document data in the collection (in bytes). - -If _size_ is a number, it will be interpreted as _size.size_. - -For more information on the properties of the _size_ object see the -[HTTP API for creating cap constraints](https://docs.arangodb.com/2.8/HttpIndexes/Cap.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection("some-collection"); - -const index = await collection.createCapConstraint(20); -// the index has been created with the handle `index.id` -assert.equal(index.size, 20); - -// -- or -- - -const index = await collection.createCapConstraint({ size: 20 }); -// the index has been created with the handle `index.id` -assert.equal(index.size, 20); -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/README.md b/Documentation/Books/Drivers/JS/Reference/Collection/README.md deleted file mode 100644 index fd0fd49c7e48..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Collection/README.md +++ /dev/null @@ -1,130 +0,0 @@ - -# Collection API - -These functions implement the -[HTTP API for manipulating collections](../../../..//HTTP/Collection/index.html). - -The _Collection API_ is implemented by all _Collection_ instances, regardless of -their specific type. I.e. it represents a shared subset between instances of -[_DocumentCollection_](DocumentCollection.md), -[_EdgeCollection_](EdgeCollection.md), -[_GraphVertexCollection_](../Graph/VertexCollection.md) and -[_GraphEdgeCollection_](../Graph/EdgeCollection.md). - -## Getting information about the collection - -See the -[HTTP API documentation](../../../..//HTTP/Collection/Getting.html) -for details. - -## collection.exists - -`async collection.exists(): boolean` - -Checks whether the collection exists. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const result = await collection.exists(); -// result indicates whether the collection exists -``` - -### collection.get - -`async collection.get(): Object` - -Retrieves general information about the collection. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const data = await collection.get(); -// data contains general information about the collection -``` - -### collection.properties - -`async collection.properties(): Object` - -Retrieves the collection's properties. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const data = await collection.properties(); -// data contains the collection's properties -``` - -### collection.count - -`async collection.count(): Object` - -Retrieves information about the number of documents in a collection. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const data = await collection.count(); -// data contains the collection's count -``` - -### collection.figures - -`async collection.figures(): Object` - -Retrieves statistics for a collection. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const data = await collection.figures(); -// data contains the collection's figures -``` - -### collection.revision - -`async collection.revision(): Object` - -Retrieves the collection revision ID. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const data = await collection.revision(); -// data contains the collection's revision -``` - -### collection.checksum - -`async collection.checksum([opts]): Object` - -Retrieves the collection checksum. - -**Arguments** - -- **opts**: `Object` (optional) - - For information on the possible options see the - [HTTP API for getting collection information](../../../..//HTTP/Collection/Getting.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const data = await collection.checksum(); -// data contains the collection's checksum -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/SimpleQueries.md b/Documentation/Books/Drivers/JS/Reference/Collection/SimpleQueries.md deleted file mode 100644 index d86d7fa4e35f..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Collection/SimpleQueries.md +++ /dev/null @@ -1,209 +0,0 @@ - -# Simple queries - -These functions implement the -[HTTP API for simple queries](../../../..//HTTP/SimpleQuery/index.html). - -## collection.all - -`async collection.all([opts]): Cursor` - -Performs a query to fetch all documents in the collection. Returns a -[new _Cursor_ instance](../Cursor.md) for the query results. - -**Arguments** - -- **opts**: `Object` (optional) - - For information on the possible options see the - [HTTP API for returning all documents](../../../..//HTTP/SimpleQuery/index.html#return-all-documents). - -## collection.any - -`async collection.any(): Object` - -Fetches a document from the collection at random. - -## collection.first - -`async collection.first([opts]): Array` - -Performs a query to fetch the first documents in the collection. Returns an -array of the matching documents. - -{% hint 'warning' %} -This method is not available when targeting ArangoDB 3.0 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -**Arguments** - -- **opts**: `Object` (optional) - - For information on the possible options see the - [HTTP API for returning the first document of a collection](https://docs.arangodb.com/2.8/HttpSimpleQuery/#first-document-of-a-collection). - - If _opts_ is a number it is treated as _opts.count_. - -## collection.last - -`async collection.last([opts]): Array` - -Performs a query to fetch the last documents in the collection. Returns an array -of the matching documents. - -{% hint 'warning' %} -This method is not available when targeting ArangoDB 3.0 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -**Arguments** - -- **opts**: `Object` (optional) - - For information on the possible options see the - [HTTP API for returning the last document of a collection](https://docs.arangodb.com/2.8/HttpSimpleQuery/#last-document-of-a-collection). - - If _opts_ is a number it is treated as _opts.count_. - -## collection.byExample - -`async collection.byExample(example, [opts]): Cursor` - -Performs a query to fetch all documents in the collection matching the given -_example_. Returns a [new _Cursor_ instance](../Cursor.md) for the query results. - -**Arguments** - -- **example**: _Object_ - - An object representing an example for documents to be matched against. - -- **opts**: _Object_ (optional) - - For information on the possible options see the - [HTTP API for fetching documents by example](../../../..//HTTP/SimpleQuery/index.html#find-documents-matching-an-example). - -## collection.firstExample - -`async collection.firstExample(example): Object` - -Fetches the first document in the collection matching the given _example_. - -**Arguments** - -- **example**: _Object_ - - An object representing an example for documents to be matched against. - -## collection.removeByExample - -`async collection.removeByExample(example, [opts]): Object` - -Removes all documents in the collection matching the given _example_. - -**Arguments** - -- **example**: _Object_ - - An object representing an example for documents to be matched against. - -- **opts**: _Object_ (optional) - - For information on the possible options see the - [HTTP API for removing documents by example](../../../..//HTTP/SimpleQuery/index.html#remove-documents-by-example). - -## collection.replaceByExample - -`async collection.replaceByExample(example, newValue, [opts]): Object` - -Replaces all documents in the collection matching the given _example_ with the -given _newValue_. - -**Arguments** - -- **example**: _Object_ - - An object representing an example for documents to be matched against. - -- **newValue**: _Object_ - - The new value to replace matching documents with. - -- **opts**: _Object_ (optional) - - For information on the possible options see the - [HTTP API for replacing documents by example](../../../..//HTTP/SimpleQuery/index.html#replace-documents-by-example). - -## collection.updateByExample - -`async collection.updateByExample(example, newValue, [opts]): Object` - -Updates (patches) all documents in the collection matching the given _example_ -with the given _newValue_. - -**Arguments** - -- **example**: _Object_ - - An object representing an example for documents to be matched against. - -- **newValue**: _Object_ - - The new value to update matching documents with. - -- **opts**: _Object_ (optional) - - For information on the possible options see the - [HTTP API for updating documents by example](../../../..//HTTP/SimpleQuery/index.html#update-documents-by-example). - -## collection.lookupByKeys - -`async collection.lookupByKeys(keys): Array` - -Fetches the documents with the given _keys_ from the collection. Returns an -array of the matching documents. - -**Arguments** - -- **keys**: _Array_ - - An array of document keys to look up. - -## collection.removeByKeys - -`async collection.removeByKeys(keys, [opts]): Object` - -Deletes the documents with the given _keys_ from the collection. - -**Arguments** - -- **keys**: _Array_ - - An array of document keys to delete. - -- **opts**: _Object_ (optional) - - For information on the possible options see the - [HTTP API for removing documents by keys](../../../..//HTTP/SimpleQuery/index.html#remove-documents-by-their-keys). - -## collection.fulltext - -`async collection.fulltext(fieldName, query, [opts]): Cursor` - -Performs a fulltext query in the given _fieldName_ on the collection. - -**Arguments** - -- **fieldName**: _String_ - - Name of the field to search on documents in the collection. - -- **query**: _String_ - - Fulltext query string to search for. - -- **opts**: _Object_ (optional) - - For information on the possible options see the - [HTTP API for fulltext queries](../../../..//HTTP/Indexes/Fulltext.html). diff --git a/Documentation/Books/Drivers/JS/Reference/Cursor.md b/Documentation/Books/Drivers/JS/Reference/Cursor.md deleted file mode 100644 index 4b22f5f89653..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Cursor.md +++ /dev/null @@ -1,300 +0,0 @@ - -# Cursor API - -_Cursor_ instances provide an abstraction over the HTTP API's limitations. -Unless a method explicitly exhausts the cursor, the driver will only fetch as -many batches from the server as necessary. Like the server-side cursors, -_Cursor_ instances are incrementally depleted as they are read from. - -```js -const db = new Database(); -const cursor = await db.query('FOR x IN 1..5 RETURN x'); -// query result list: [1, 2, 3, 4, 5] -const value = await cursor.next(); -assert.equal(value, 1); -// remaining result list: [2, 3, 4, 5] -``` - -## cursor.count - -`cursor.count: number` - -The total number of documents in the query result. This is only available if the -`count` option was used. - -## cursor.all - -`async cursor.all(): Array` - -Exhausts the cursor, then returns an array containing all values in the cursor's -remaining result list. - -**Examples** - -```js -const cursor = await db.query('FOR x IN 1..5 RETURN x'); -const result = await cursor.all() -// result is an array containing the entire query result -assert.deepEqual(result, [1, 2, 3, 4, 5]); -assert.equal(cursor.hasNext(), false); -``` - -## cursor.next - -`async cursor.next(): Object` - -Advances the cursor and returns the next value in the cursor's remaining result -list. If the cursor has already been exhausted, returns `undefined` instead. - -**Examples** - -```js -// query result list: [1, 2, 3, 4, 5] -const val = await cursor.next(); -assert.equal(val, 1); -// remaining result list: [2, 3, 4, 5] - -const val2 = await cursor.next(); -assert.equal(val2, 2); -// remaining result list: [3, 4, 5] -``` - -## cursor.hasNext - -`cursor.hasNext(): boolean` - -Returns `true` if the cursor has more values or `false` if the cursor has been -exhausted. - -**Examples** - -```js -await cursor.all(); // exhausts the cursor -assert.equal(cursor.hasNext(), false); -``` - -## cursor.each - -`async cursor.each(fn): any` - -Advances the cursor by applying the function _fn_ to each value in the cursor's -remaining result list until the cursor is exhausted or _fn_ explicitly returns -`false`. - -Returns the last return value of _fn_. - -Equivalent to _Array.prototype.forEach_ (except async). - -**Arguments** - -* **fn**: `Function` - - A function that will be invoked for each value in the cursor's remaining - result list until it explicitly returns `false` or the cursor is exhausted. - - The function receives the following arguments: - - * **value**: `any` - - The value in the cursor's remaining result list. - - * **index**: `number` - - The index of the value in the cursor's remaining result list. - - * **cursor**: `Cursor` - - The cursor itself. - -**Examples** - -```js -const results = []; -function doStuff(value) { - const VALUE = value.toUpperCase(); - results.push(VALUE); - return VALUE; -} - -const cursor = await db.query('FOR x IN ["a", "b", "c"] RETURN x') -const last = await cursor.each(doStuff); -assert.deepEqual(results, ['A', 'B', 'C']); -assert.equal(cursor.hasNext(), false); -assert.equal(last, 'C'); -``` - -## cursor.every - -`async cursor.every(fn): boolean` - -Advances the cursor by applying the function _fn_ to each value in the cursor's -remaining result list until the cursor is exhausted or _fn_ returns a value that -evaluates to `false`. - -Returns `false` if _fn_ returned a value that evaluates to `false`, or `true` -otherwise. - -Equivalent to _Array.prototype.every_ (except async). - -**Arguments** - -* **fn**: `Function` - - A function that will be invoked for each value in the cursor's remaining - result list until it returns a value that evaluates to `false` or the cursor - is exhausted. - - The function receives the following arguments: - - * **value**: `any` - - The value in the cursor's remaining result list. - - * **index**: `number` - - The index of the value in the cursor's remaining result list. - - * **cursor**: `Cursor` - - The cursor itself. - -```js -const even = value => value % 2 === 0; - -const cursor = await db.query('FOR x IN 2..5 RETURN x'); -const result = await cursor.every(even); -assert.equal(result, false); // 3 is not even -assert.equal(cursor.hasNext(), true); - -const value = await cursor.next(); -assert.equal(value, 4); // next value after 3 -``` - -## cursor.some - -`async cursor.some(fn): boolean` - -Advances the cursor by applying the function _fn_ to each value in the cursor's -remaining result list until the cursor is exhausted or _fn_ returns a value that -evaluates to `true`. - -Returns `true` if _fn_ returned a value that evaluates to `true`, or `false` -otherwise. - -Equivalent to _Array.prototype.some_ (except async). - -**Examples** - -```js -const even = value => value % 2 === 0; - -const cursor = await db.query('FOR x IN 1..5 RETURN x'); -const result = await cursor.some(even); -assert.equal(result, true); // 2 is even -assert.equal(cursor.hasNext(), true); - -const value = await cursor.next(); -assert.equal(value, 3); // next value after 2 -``` - -## cursor.map - -`cursor.map(fn): Array` - -Advances the cursor by applying the function _fn_ to each value in the cursor's -remaining result list until the cursor is exhausted. - -Returns an array of the return values of _fn_. - -Equivalent to _Array.prototype.map_ (except async). - -**Note**: This creates an array of all return values. It is probably a bad idea -to do this for very large query result sets. - -**Arguments** - -* **fn**: `Function` - - A function that will be invoked for each value in the cursor's remaining - result list until the cursor is exhausted. - - The function receives the following arguments: - - * **value**: `any` - - The value in the cursor's remaining result list. - - * **index**: `number` - - The index of the value in the cursor's remaining result list. - - * **cursor**: `Cursor` - - The cursor itself. - -**Examples** - -```js -const square = value => value * value; -const cursor = await db.query('FOR x IN 1..5 RETURN x'); -const result = await cursor.map(square); -assert.equal(result.length, 5); -assert.deepEqual(result, [1, 4, 9, 16, 25]); -assert.equal(cursor.hasNext(), false); -``` - -## cursor.reduce - -`cursor.reduce(fn, [accu]): any` - -Exhausts the cursor by reducing the values in the cursor's remaining result list -with the given function _fn_. If _accu_ is not provided, the first value in the -cursor's remaining result list will be used instead (the function will not be -invoked for that value). - -Equivalent to _Array.prototype.reduce_ (except async). - -**Arguments** - -* **fn**: `Function` - - A function that will be invoked for each value in the cursor's remaining - result list until the cursor is exhausted. - - The function receives the following arguments: - - * **accu**: `any` - - The return value of the previous call to _fn_. If this is the first call, - _accu_ will be set to the _accu_ value passed to _reduce_ or the first value - in the cursor's remaining result list. - - * **value**: `any` - - The value in the cursor's remaining result list. - - * **index**: `number` - - The index of the value in the cursor's remaining result list. - - * **cursor**: `Cursor` - - The cursor itself. - -**Examples** - -```js -const add = (a, b) => a + b; -const baseline = 1000; - -const cursor = await db.query('FOR x IN 1..5 RETURN x'); -const result = await cursor.reduce(add, baseline) -assert.equal(result, baseline + 1 + 2 + 3 + 4 + 5); -assert.equal(cursor.hasNext(), false); - -// -- or -- - -const result = await cursor.reduce(add); -assert.equal(result, 1 + 2 + 3 + 4 + 5); -assert.equal(cursor.hasNext(), false); -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Database/AqlUserFunctions.md b/Documentation/Books/Drivers/JS/Reference/Database/AqlUserFunctions.md deleted file mode 100644 index 6a510310ac57..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Database/AqlUserFunctions.md +++ /dev/null @@ -1,83 +0,0 @@ - -# Managing AQL user functions - -These functions implement the -[HTTP API for managing AQL user functions](../../../..//HTTP/AqlUserFunctions/index.html). - -## database.listFunctions - -`async database.listFunctions(): Array` - -Fetches a list of all AQL user functions registered with the database. - -**Examples** - -```js -const db = new Database(); -const functions = db.listFunctions(); -// functions is a list of function descriptions -``` - -## database.createFunction - -`async database.createFunction(name, code): Object` - -Creates an AQL user function with the given _name_ and _code_ if it does not -already exist or replaces it if a function with the same name already existed. - -**Arguments** - -* **name**: `string` - - A valid AQL function name, e.g.: `"myfuncs::accounting::calculate_vat"`. - -* **code**: `string` - - A string evaluating to a JavaScript function (not a JavaScript function - object). - -**Examples** - -```js -const db = new Database(); -await db.createFunction( - 'ACME::ACCOUNTING::CALCULATE_VAT', - String(function (price) { - return price * 0.19; - }) -); -// Use the new function in an AQL query with template handler: -const cursor = await db.query(aql` - FOR product IN products - RETURN MERGE( - {vat: ACME::ACCOUNTING::CALCULATE_VAT(product.price)}, - product - ) -`); -// cursor is a cursor for the query result -``` - -## database.dropFunction - -`async database.dropFunction(name, [group]): Object` - -Deletes the AQL user function with the given name from the database. - -**Arguments** - -* **name**: `string` - - The name of the user function to drop. - -* **group**: `boolean` (Default: `false`) - - If set to `true`, all functions with a name starting with _name_ will be - deleted; otherwise only the function with the exact name will be deleted. - -**Examples** - -```js -const db = new Database(); -await db.dropFunction('ACME::ACCOUNTING::CALCULATE_VAT'); -// the function no longer exists -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Database/CollectionAccess.md b/Documentation/Books/Drivers/JS/Reference/Database/CollectionAccess.md deleted file mode 100644 index 603695848164..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Database/CollectionAccess.md +++ /dev/null @@ -1,103 +0,0 @@ - -# Accessing collections - -These functions implement the -[HTTP API for accessing collections](../../../..//HTTP/Collection/Getting.html). - -## database.collection - -`database.collection(collectionName): DocumentCollection` - -Returns a _DocumentCollection_ instance for the given collection name. - -**Arguments** - -- **collectionName**: `string` - - Name of the edge collection. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection("potatoes"); -``` - -## database.edgeCollection - -`database.edgeCollection(collectionName): EdgeCollection` - -Returns an _EdgeCollection_ instance for the given collection name. - -**Arguments** - -- **collectionName**: `string` - - Name of the edge collection. - -**Examples** - -```js -const db = new Database(); -const collection = db.edgeCollection("potatoes"); -``` - -## database.listCollections - -`async database.listCollections([excludeSystem]): Array` - -Fetches all collections from the database and returns an array of collection -descriptions. - -**Arguments** - -- **excludeSystem**: `boolean` (Default: `true`) - - Whether system collections should be excluded. - -**Examples** - -```js -const db = new Database(); - -const collections = await db.listCollections(); -// collections is an array of collection descriptions -// not including system collections - -// -- or -- - -const collections = await db.listCollections(false); -// collections is an array of collection descriptions -// including system collections -``` - -## database.collections - -`async database.collections([excludeSystem]): Array` - -Fetches all collections from the database and returns an array of -_DocumentCollection_ and _EdgeCollection_ instances for the collections. - -**Arguments** - -- **excludeSystem**: `boolean` (Default: `true`) - - Whether system collections should be excluded. - -**Examples** - -```js -const db = new Database(); - -const collections = await db.collections() -// collections is an array of DocumentCollection -// and EdgeCollection instances -// not including system collections - -// -- or -- - -const collections = await db.collections(false) -// collections is an array of DocumentCollection -// and EdgeCollection instances -// including system collections -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Database/DatabaseManipulation.md b/Documentation/Books/Drivers/JS/Reference/Database/DatabaseManipulation.md deleted file mode 100644 index 7ea7a0818016..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Database/DatabaseManipulation.md +++ /dev/null @@ -1,137 +0,0 @@ - -# Manipulating databases - -These functions implement the -[HTTP API for manipulating databases](../../../..//HTTP/Database/index.html). - -## database.createDatabase - -`async database.createDatabase(databaseName, [users]): Object` - -Creates a new database with the given _databaseName_. - -**Arguments** - -- **databaseName**: `string` - - Name of the database to create. - -- **users**: `Array` (optional) - - If specified, the array must contain objects with the following properties: - - - **username**: `string` - - The username of the user to create for the database. - - - **passwd**: `string` (Default: empty) - - The password of the user. - - - **active**: `boolean` (Default: `true`) - - Whether the user is active. - - - **extra**: `Object` (optional) - - An object containing additional user data. - -**Examples** - -```js -const db = new Database(); -const info = await db.createDatabase("mydb", [{ username: "root" }]); -// the database has been created -``` - -## database.exists - -`async database.exists(): boolean` - -Checks whether the database exists. - -**Examples** - -```js -const db = new Database(); -const result = await db.exists(); -// result indicates whether the database exists -``` - -## database.get - -`async database.get(): Object` - -Fetches the database description for the active database from the server. - -**Examples** - -```js -const db = new Database(); -const info = await db.get(); -// the database exists -``` - -## database.listDatabases - -`async database.listDatabases(): Array` - -Fetches all databases from the server and returns an array of their names. - -**Examples** - -```js -const db = new Database(); -const names = await db.listDatabases(); -// databases is an array of database names -``` - -## database.listUserDatabases - -`async database.listUserDatabases(): Array` - -Fetches all databases accessible to the active user from the server and returns -an array of their names. - -**Examples** - -```js -const db = new Database(); -const names = await db.listUserDatabases(); -// databases is an array of database names -``` - -## database.dropDatabase - -`async database.dropDatabase(databaseName): Object` - -Deletes the database with the given _databaseName_ from the server. - -```js -const db = new Database(); -await db.dropDatabase("mydb"); -// database "mydb" no longer exists -``` - -## database.truncate - -`async database.truncate([excludeSystem]): Object` - -Deletes **all documents in all collections** in the active database. - -**Arguments** - -- **excludeSystem**: `boolean` (Default: `true`) - - Whether system collections should be excluded. Note that this option will be - ignored because truncating system collections is not supported anymore for - some system collections. - -**Examples** - -```js -const db = new Database(); - -await db.truncate(); -// all non-system collections in this database are now empty -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Database/FoxxServices.md b/Documentation/Books/Drivers/JS/Reference/Database/FoxxServices.md deleted file mode 100644 index 8d11b9cb9c8f..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Database/FoxxServices.md +++ /dev/null @@ -1,776 +0,0 @@ - -# Managing Foxx services - -## database.listServices - -`async database.listServices([excludeSystem]): Array` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Fetches a list of all installed service. - -**Arguments** - -- **excludeSystem**: `boolean` (Default: `true`) - - Whether system services should be excluded. - -**Examples** - -```js -const services = await db.listServices(); - -// -- or -- - -const services = await db.listServices(false); -``` - -## database.installService - -`async database.installService(mount, source, [options]): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Installs a new service. - -**Arguments** - -- **mount**: `string` - - The service's mount point, relative to the database. - -- **source**: `Buffer | Readable | File | string` - - The service bundle to install. - -- **options**: `Object` (optional) - - An object with any of the following properties: - - - **configuration**: `Object` (optional) - - An object mapping configuration option names to values. - - - **dependencies**: `Object` (optional) - - An object mapping dependency aliases to mount points. - - - **development**: `boolean` (Default: `false`) - - Whether the service should be installed in development mode. - - - **legacy**: `boolean` (Default: `false`) - - Whether the service should be installed in legacy compatibility mode. - - This overrides the `engines` option in the service manifest (if any). - - - **setup**: `boolean` (Default: `true`) - - Whether the setup script should be executed. - -**Examples** - -```js -const source = fs.createReadStream("./my-foxx-service.zip"); -const info = await db.installService("/hello", source); - -// -- or -- - -const source = fs.readFileSync("./my-foxx-service.zip"); -const info = await db.installService("/hello", source); - -// -- or -- - -const element = document.getElementById("my-file-input"); -const source = element.files[0]; -const info = await db.installService("/hello", source); -``` - -## database.replaceService - -`async database.replaceService(mount, source, [options]): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Replaces an existing service with a new service by completely removing the old -service and installing a new service at the same mount point. - -**Arguments** - -- **mount**: `string` - - The service's mount point, relative to the database. - -- **source**: `Buffer | Readable | File | string` - - The service bundle to replace the existing service with. - -- **options**: `Object` (optional) - - An object with any of the following properties: - - - **configuration**: `Object` (optional) - - An object mapping configuration option names to values. - - This configuration will replace the existing configuration. - - - **dependencies**: `Object` (optional) - - An object mapping dependency aliases to mount points. - - These dependencies will replace the existing dependencies. - - - **development**: `boolean` (Default: `false`) - - Whether the new service should be installed in development mode. - - - **legacy**: `boolean` (Default: `false`) - - Whether the new service should be installed in legacy compatibility mode. - - This overrides the `engines` option in the service manifest (if any). - - - **teardown**: `boolean` (Default: `true`) - - Whether the teardown script of the old service should be executed. - - - **setup**: `boolean` (Default: `true`) - - Whether the setup script of the new service should be executed. - -**Examples** - -```js -const source = fs.createReadStream("./my-foxx-service.zip"); -const info = await db.replaceService("/hello", source); - -// -- or -- - -const source = fs.readFileSync("./my-foxx-service.zip"); -const info = await db.replaceService("/hello", source); - -// -- or -- - -const element = document.getElementById("my-file-input"); -const source = element.files[0]; -const info = await db.replaceService("/hello", source); -``` - -## database.upgradeService - -`async database.upgradeService(mount, source, [options]): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Replaces an existing service with a new service while retaining the old -service's configuration and dependencies. - -**Arguments** - -- **mount**: `string` - - The service's mount point, relative to the database. - -- **source**: `Buffer | Readable | File | string` - - The service bundle to replace the existing service with. - -- **options**: `Object` (optional) - - An object with any of the following properties: - - - **configuration**: `Object` (optional) - - An object mapping configuration option names to values. - - This configuration will be merged into the existing configuration. - - - **dependencies**: `Object` (optional) - - An object mapping dependency aliases to mount points. - - These dependencies will be merged into the existing dependencies. - - - **development**: `boolean` (Default: `false`) - - Whether the new service should be installed in development mode. - - - **legacy**: `boolean` (Default: `false`) - - Whether the new service should be installed in legacy compatibility mode. - - This overrides the `engines` option in the service manifest (if any). - - - **teardown**: `boolean` (Default: `false`) - - Whether the teardown script of the old service should be executed. - - - **setup**: `boolean` (Default: `true`) - - Whether the setup script of the new service should be executed. - -**Examples** - -```js -const source = fs.createReadStream("./my-foxx-service.zip"); -const info = await db.upgradeService("/hello", source); - -// -- or -- - -const source = fs.readFileSync("./my-foxx-service.zip"); -const info = await db.upgradeService("/hello", source); - -// -- or -- - -const element = document.getElementById("my-file-input"); -const source = element.files[0]; -const info = await db.upgradeService("/hello", source); -``` - -## database.uninstallService - -`async database.uninstallService(mount, [options]): void` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Completely removes a service from the database. - -**Arguments** - -- **mount**: `string` - - The service's mount point, relative to the database. - -- **options**: `Object` (optional) - - An object with any of the following properties: - - - **teardown**: `boolean` (Default: `true`) - - Whether the teardown script should be executed. - -**Examples** - -```js -await db.uninstallService("/my-service"); -// service was uninstalled -``` - -## database.getService - -`async database.getService(mount): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Retrieves information about a mounted service. - -**Arguments** - -- **mount**: `string` - - The service's mount point, relative to the database. - -**Examples** - -```js -const info = await db.getService("/my-service"); -// info contains detailed information about the service -``` - -## database.getServiceConfiguration - -`async database.getServiceConfiguration(mount, [minimal]): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Retrieves an object with information about the service's configuration options -and their current values. - -**Arguments** - -- **mount**: `string` - - The service's mount point, relative to the database. - -- **minimal**: `boolean` (Default: `false`) - - Only return the current values. - -**Examples** - -```js -const config = await db.getServiceConfiguration("/my-service"); -// config contains information about the service's configuration -``` - -## database.replaceServiceConfiguration - -`async database.replaceServiceConfiguration(mount, configuration, [minimal]): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Replaces the configuration of the given service. - -**Arguments** - -- **mount**: `string` - - The service's mount point, relative to the database. - -- **configuration**: `Object` - - An object mapping configuration option names to values. - -- **minimal**: `boolean` (Default: `false`) - - Only return the current values and warnings (if any). - - **Note:** when using ArangoDB 3.2.8 or older, enabling this option avoids - triggering a second request to the database. - -**Examples** - -```js -const config = { currency: "USD", locale: "en-US" }; -const info = await db.replaceServiceConfiguration("/my-service", config); -// info.values contains information about the service's configuration -// info.warnings contains any validation errors for the configuration -``` - -## database.updateServiceConfiguration - -`async database.updateServiceConfiguration(mount, configuration, [minimal]): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Updates the configuration of the given service my merging the new values into -the existing ones. - -**Arguments** - -- **mount**: `string` - - The service's mount point, relative to the database. - -- **configuration**: `Object` - - An object mapping configuration option names to values. - -- **minimal**: `boolean` (Default: `false`) - - Only return the current values and warnings (if any). - - **Note:** when using ArangoDB 3.2.8 or older, enabling this option avoids - triggering a second request to the database. - -**Examples** - -```js -const config = { locale: "en-US" }; -const info = await db.updateServiceConfiguration("/my-service", config); -// info.values contains information about the service's configuration -// info.warnings contains any validation errors for the configuration -``` - -## database.getServiceDependencies - -`async database.getServiceDependencies(mount, [minimal]): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Retrieves an object with information about the service's dependencies and their -current mount points. - -**Arguments** - -- **mount**: `string` - - The service's mount point, relative to the database. - -- **minimal**: `boolean` (Default: `false`) - - Only return the current values and warnings (if any). - -**Examples** - -```js -const deps = await db.getServiceDependencies("/my-service"); -// deps contains information about the service's dependencies -``` - -## database.replaceServiceDependencies - -`async database.replaceServiceDependencies(mount, dependencies, [minimal]): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Replaces the dependencies for the given service. - -**Arguments** - -- **mount**: `string` - - The service's mount point, relative to the database. - -- **dependencies**: `Object` - - An object mapping dependency aliases to mount points. - -- **minimal**: `boolean` (Default: `false`) - - Only return the current values and warnings (if any). - - **Note:** when using ArangoDB 3.2.8 or older, enabling this option avoids - triggering a second request to the database. - -**Examples** - -```js -const deps = { mailer: "/mailer-api", auth: "/remote-auth" }; -const info = await db.replaceServiceDependencies("/my-service", deps); -// info.values contains information about the service's dependencies -// info.warnings contains any validation errors for the dependencies -``` - -## database.updateServiceDependencies - -`async database.updateServiceDependencies(mount, dependencies, [minimal]): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Updates the dependencies for the given service by merging the new values into -the existing ones. - -**Arguments** - -- **mount**: `string` - - The service's mount point, relative to the database. - -- **dependencies**: `Object` - - An object mapping dependency aliases to mount points. - -- **minimal**: `boolean` (Default: `false`) - - Only return the current values and warnings (if any). - - **Note:** when using ArangoDB 3.2.8 or older, enabling this option avoids - triggering a second request to the database. - -**Examples** - -```js -const deps = { mailer: "/mailer-api" }; -const info = await db.updateServiceDependencies("/my-service", deps); -// info.values contains information about the service's dependencies -// info.warnings contains any validation errors for the dependencies -``` - -## database.enableServiceDevelopmentMode - -`async database.enableServiceDevelopmentMode(mount): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Enables development mode for the given service. - -**Arguments** - -- **mount**: `string` - - The service's mount point, relative to the database. - -**Examples** - -```js -const info = await db.enableServiceDevelopmentMode("/my-service"); -// the service is now in development mode -// info contains detailed information about the service -``` - -## database.disableServiceDevelopmentMode - -`async database.disableServiceDevelopmentMode(mount): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Disabled development mode for the given service and commits the service state to -the database. - -**Arguments** - -- **mount**: `string` - - The service's mount point, relative to the database. - -**Examples** - -```js -const info = await db.disableServiceDevelopmentMode("/my-service"); -// the service is now in production mode -// info contains detailed information about the service -``` - -## database.listServiceScripts - -`async database.listServiceScripts(mount): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Retrieves a list of the service's scripts. - -Returns an object mapping each name to a more readable representation. - -**Arguments** - -- **mount**: `string` - - The service's mount point, relative to the database. - -**Examples** - -```js -const scripts = await db.listServiceScripts("/my-service"); -// scripts is an object listing the service scripts -``` - -## database.runServiceScript - -`async database.runServiceScript(mount, name, [scriptArg]): any` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Runs a service script and returns the result. - -**Arguments** - -- **mount**: `string` - - The service's mount point, relative to the database. - -- **name**: `string` - - Name of the script to execute. - -- **scriptArg**: `any` - - Value that will be passed as an argument to the script. - -**Examples** - -```js -const result = await db.runServiceScript("/my-service", "setup"); -// result contains the script's exports (if any) -``` - -## database.runServiceTests - -`async database.runServiceTests(mount, [reporter]): any` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Runs the tests of a given service and returns a formatted report. - -**Arguments** - -- **mount**: `string` - - The service's mount point, relative to the database - -- **options**: `Object` (optional) - - An object with any of the following properties: - - - **reporter**: `string` (Default: `default`) - - The reporter to use to process the test results. - - As of ArangoDB 3.2 the following reporters are supported: - - - **stream**: an array of event objects - - **suite**: nested suite objects with test results - - **xunit**: JSONML representation of an XUnit report - - **tap**: an array of TAP event strings - - **default**: an array of test results - - - **idiomatic**: `boolean` (Default: `false`) - - Whether the results should be converted to the apropriate `string` - representation: - - - **xunit** reports will be formatted as XML documents - - **tap** reports will be formatted as TAP streams - - **stream** reports will be formatted as JSON-LD streams - -**Examples** - -```js -const opts = { reporter: "xunit", idiomatic: true }; -const result = await db.runServiceTests("/my-service", opts); -// result contains the XUnit report as a string -``` - -## database.downloadService - -`async database.downloadService(mount): Buffer | Blob` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Retrieves a zip bundle containing the service files. - -Returns a `Buffer` in Node or `Blob` in the browser version. - -**Arguments** - -- **mount**: `string` - - The service's mount point, relative to the database. - -**Examples** - -```js -const bundle = await db.downloadService("/my-service"); -// bundle is a Buffer/Blob of the service bundle -``` - -## database.getServiceReadme - -`async database.getServiceReadme(mount): string?` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Retrieves the text content of the service's `README` or `README.md` file. - -Returns `undefined` if no such file could be found. - -**Arguments** - -- **mount**: `string` - - The service's mount point, relative to the database. - -**Examples** - -```js -const readme = await db.getServiceReadme("/my-service"); -// readme is a string containing the service README's -// text content, or undefined if no README exists -``` - -## database.getServiceDocumentation - -`async database.getServiceDocumentation(mount): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Retrieves a Swagger API description object for the service installed at the -given mount point. - -**Arguments** - -- **mount**: `string` - - The service's mount point, relative to the database. - -**Examples** - -```js -const spec = await db.getServiceDocumentation("/my-service"); -// spec is a Swagger API description of the service -``` - -## database.commitLocalServiceState - -`async database.commitLocalServiceState([replace]): void` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.2 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Writes all locally available services to the database and updates any service -bundles missing in the database. - -**Arguments** - -- **replace**: `boolean` (Default: `false`) - - Also commit outdated services. - - This can be used to solve some consistency problems when service bundles are - missing in the database or were deleted manually. - -**Examples** - -```js -await db.commitLocalServiceState(); -// all services available on the coordinator have been written to the db - -// -- or -- - -await db.commitLocalServiceState(true); -// all service conflicts have been resolved in favor of this coordinator -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Database/GraphAccess.md b/Documentation/Books/Drivers/JS/Reference/Database/GraphAccess.md deleted file mode 100644 index 2aef447c8760..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Database/GraphAccess.md +++ /dev/null @@ -1,40 +0,0 @@ - -# Accessing graphs - -These functions implement the -[HTTP API for accessing general graphs](../../../..//HTTP/Gharial/index.html). - -## database.graph - -`database.graph(graphName): Graph` - -Returns a _Graph_ instance representing the graph with the given graph name. - -## database.listGraphs - -`async database.listGraphs(): Array` - -Fetches all graphs from the database and returns an array of graph descriptions. - -**Examples** - -```js -const db = new Database(); -const graphs = await db.listGraphs(); -// graphs is an array of graph descriptions -``` - -## database.graphs - -`async database.graphs(): Array` - -Fetches all graphs from the database and returns an array of _Graph_ instances -for the graphs. - -**Examples** - -```js -const db = new Database(); -const graphs = await db.graphs(); -// graphs is an array of Graph instances -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Database/HttpRoutes.md b/Documentation/Books/Drivers/JS/Reference/Database/HttpRoutes.md deleted file mode 100644 index d3ec93a0b6f6..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Database/HttpRoutes.md +++ /dev/null @@ -1,38 +0,0 @@ - -# Arbitrary HTTP routes - -## database.route - -`database.route([path,] [headers]): Route` - -Returns a new _Route_ instance for the given path (relative to the database) -that can be used to perform arbitrary HTTP requests. - -**Arguments** - -* **path**: `string` (optional) - - The database-relative URL of the route. - -* **headers**: `Object` (optional) - - Default headers that should be sent with each request to the route. - -If _path_ is missing, the route will refer to the base URL of the database. - -For more information on _Route_ instances see the -[_Route API_ below](../Route.md). - -**Examples** - -```js -const db = new Database(); -const myFoxxService = db.route('my-foxx-service'); -const response = await myFoxxService.post('users', { - username: 'admin', - password: 'hunter2' -}); -// response.body is the result of -// POST /_db/_system/my-foxx-service/users -// with JSON request body '{"username": "admin", "password": "hunter2"}' -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Database/Queries.md b/Documentation/Books/Drivers/JS/Reference/Database/Queries.md deleted file mode 100644 index 72aac33663a0..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Database/Queries.md +++ /dev/null @@ -1,249 +0,0 @@ - -# Queries - -These functions implements the -[HTTP API for single round-trip AQL queries](../../../..//HTTP/AqlQueryCursor/QueryResults.html) -as well as the -[HTTP API for managing queries](../../../..//HTTP/AqlQuery/index.html). - -For collection-specific queries see [Simple Queries](../Collection/SimpleQueries.md). - -## database.query - -`async database.query(query, [bindVars,] [opts]): Cursor` - -Performs a database query using the given _query_ and _bindVars_, then returns a -[new _Cursor_ instance](../Cursor.md) for the result list. - -**Arguments** - -- **query**: `string | AqlQuery | AqlLiteral` - - An AQL query as a string or - [AQL query object](../Aql.md#aql) or - [AQL literal](../Aql.md#aqlliteral). - If the query is an AQL query object, the second argument is treated as the - _opts_ argument instead of _bindVars_. - -- **bindVars**: `Object` (optional) - - An object defining the variables to bind the query to. - -- **opts**: `Object` (optional) - - Additional parameter object that will be passed to the query API. - Possible keys are _count_ and _options_ (explained below) - -If _opts.count_ is set to `true`, the cursor will have a _count_ property set to -the query result count. - -Possible key options in _opts.options_ include: _failOnWarning_, _cache_, -profile or _skipInaccessibleCollections_. -For a complete list of query settings please reference the -[setting options](../../../..//AQL/Invocation/WithArangosh.html#setting-options). - -Additionally if _opts.allowDirtyRead_ is set to `true`, the request will -explicitly permit ArangoDB to return a potentially dirty or stale result and -arangojs will load balance the request without distinguishing between leaders -and followers. Note that dirty reads are only supported for read-only queries -(e.g. not using `INSERT`, `UPDATE`, `REPLACE` or `REMOVE` expressions). - -{% hint 'info' %} -Dirty reads are only available when targeting ArangoDB 3.4 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Additionally _opts.timeout_ can be set to a non-negative number to force the -request to be cancelled after that amount of milliseconds. Note that this will -simply close the connection and not result in the actual query being cancelled -in ArangoDB, the query will still be executed to completion and continue to -consume resources in the database or cluster. - -If _query_ is an object with _query_ and _bindVars_ properties, those will be -used as the values of the respective arguments instead. - -**Examples** - -```js -const db = new Database(); -const active = true; - -// Using the aql template tag -const cursor = await db.query(aql` - FOR u IN _users - FILTER u.authData.active == ${active} - RETURN u.user -`); -// cursor is a cursor for the query result - -// -- or -- - -// Old-school JS with explicit bindVars: -db.query("FOR u IN _users FILTER u.authData.active == @active RETURN u.user", { - active: true -}).then(function(cursor) { - // cursor is a cursor for the query result -}); -``` - -## aql - -`aql(strings, ...args): Object` - -Template string handler (aka template tag) for AQL queries. Converts a template -string to an object that can be passed to `database.query` by converting -arguments to bind variables. - -**Note**: If you want to pass a collection name as a bind variable, you need to -pass a _Collection_ instance (e.g. what you get by passing the collection name -to `db.collection`) instead. If you see the error `"array expected as operand to FOR loop"`, -you're likely passing a collection name instead of a collection instance. - -**Examples** - -```js -const userCollection = db.collection("_users"); -const role = "admin"; - -const query = aql` - FOR user IN ${userCollection} - FILTER user.role == ${role} - RETURN user -`; - -// -- is equivalent to -- -const query = { - query: "FOR user IN @@value0 FILTER user.role == @value1 RETURN user", - bindVars: { "@value0": userCollection.name, value1: role } -}; -``` - -Note how the aql template tag automatically handles collection references -(`@@value0` instead of `@value0`) for us so you don't have to worry about -counting at-symbols. - -Because the aql template tag creates actual bindVars instead of inlining values -directly, it also avoids injection attacks via malicious parameters: - -```js -// malicious user input -const email = '" || (FOR x IN secrets REMOVE x IN secrets) || "'; - -// DON'T do this! -const query = ` - FOR user IN users - FILTER user.email == "${email}" - RETURN user -`; -// FILTER user.email == "" || (FOR x IN secrets REMOVE x IN secrets) || "" - -// instead do this! -const query = aql` - FOR user IN users - FILTER user.email == ${email} - RETURN user -`; -// FILTER user.email == @value0 -``` - -## database.explain - -`async database.explain(query, [bindVars,] [opts]): ExplainResult` - -Explains a database query using the given _query_ and _bindVars_ and -returns one or more plans. - -**Arguments** - -- **query**: `string | AqlQuery | AqlLiteral` - - An AQL query as a string or - [AQL query object](../Aql.md#aql) or - [AQL literal](../Aql.md#aqlliteral). - If the query is an AQL query object, the second argument is treated as the - _opts_ argument instead of _bindVars_. - -- **bindVars**: `Object` (optional) - - An object defining the variables to bind the query to. - -- **opts**: `Object` (optional) - - - **optimizer**: `Object` (optional) - - An object with a single property **rules**, a string array of optimizer - rules to be used for the query. - - - **maxNumberOfPlans**: `number` (optional) - - Maximum number of plans that the optimizer is allowed to generate. - Setting this to a low value limits the amount of work the optimizer does. - - - **allPlans**: `boolean` (Default: `false`) - - If set to true, all possible execution plans will be returned - as the _plans_ property. Otherwise only the optimal execution plan will - be returned as the _plan_ property. - -## database.parse - -`async database.parse(query): ParseResult` - -Parses the given query and returns the result. - -**Arguments** - -- **query**: `string | AqlQuery | AqlLiteral` - - An AQL query as a string or - [AQL query object](../Aql.md#aql) or - [AQL literal](../Aql.md#aqlliteral). - If the query is an AQL query object, its bindVars (if any) will be ignored. - -## database.queryTracking - -`async database.queryTracking(): QueryTrackingProperties` - -Fetches the query tracking properties. - -## database.setQueryTracking - -`async database.setQueryTracking(props): void` - -Modifies the query tracking properties. - -**Arguments** - -- **props**: `Partial` - - Query tracking properties with new values to set. - -## database.listRunningQueries - -`async database.listRunningQueries(): Array` - -Fetches a list of information for all currently running queries. - -## database.listSlowQueries - -`async database.listSlowQueries(): Array` - -Fetches a list of information for all recent slow queries. - -## database.clearSlowQueries - -`async database.clearSlowQueries(): void` - -Clears the list of recent slow queries. - -## database.killQuery - -`async database.killQuery(queryId): void` - -Kills a running query with the given ID. - -**Arguments** - -- **queryId**: `string` - - The ID of a currently running query. diff --git a/Documentation/Books/Drivers/JS/Reference/Database/README.md b/Documentation/Books/Drivers/JS/Reference/Database/README.md deleted file mode 100644 index a15c090d65c9..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Database/README.md +++ /dev/null @@ -1,316 +0,0 @@ - -# Database API - -## new Database - -`new Database([config]): Database` - -Creates a new _Database_ instance. - -If _config_ is a string, it will be interpreted as _config.url_. - -**Arguments** - -- **config**: `Object` (optional) - - An object with the following properties: - - - **url**: `string | Array` (Default: `http://localhost:8529`) - - Base URL of the ArangoDB server or list of server URLs. - - When working with a cluster or a single server with leader/follower failover, - [the method `db.acquireHostList`](#databaseacquirehostlist) - can be used to automatically pick up additional coordinators/followers at - any point. - - When running ArangoDB on a unix socket, e.g. `/tmp/arangodb.sock`, the - following URL formats are supported for unix sockets: - - - `unix:///tmp/arangodb.sock` (no SSL) - - `http+unix:///tmp/arangodb.sock` (or `https+unix://` for SSL) - - `http://unix:/tmp/arangodb.sock` (or `https://unix:` for SSL) - - Additionally `ssl` and `tls` are treated as synonymous with `https` and - `tcp` is treated as synonymous with `http`, so the following URLs are - considered identical: - - - `tcp://localhost:8529` and `http://localhost:8529` - - `ssl://localhost:8529` and `https://localhost:8529` - - `tcp+unix:///tmp/arangodb.sock` and `http+unix:///tmp/arangodb.sock` - - `ssl+unix:///tmp/arangodb.sock` and `https+unix:///tmp/arangodb.sock` - - `tcp://unix:/tmp/arangodb.sock` and `http://unix:/tmp/arangodb.sock` - - `ssl://unix:/tmp/arangodb.sock` and `https://unix:/tmp/arangodb.sock` - - If you want to use ArangoDB with authentication, see - _useBasicAuth_ or _useBearerAuth_ methods. - - If you need to support self-signed HTTPS certificates, you may have to add - your certificates to the _agentOptions_, e.g.: - - ```js - ... - agentOptions: { - ca: [ - fs.readFileSync(".ssl/sub.class1.server.ca.pem"), - fs.readFileSync(".ssl/ca.pem") - ] - } - ``` - - Although this is **strongly discouraged**, it's also possible to disable - HTTPS certificate validation entirely, but note this has - **extremely dangerous** security implications: - - ```js - ... - agentOptions: { - rejectUnauthorized: false - } - ``` - - - **isAbsolute**: `boolean` (Default: `false`) - - If this option is explicitly set to `true`, the _url_ will be treated as the - absolute database path and arangojs will not append the database path to it. - - **Note:** This makes it impossible to switch databases with _useDatabase_ - or using _acquireHostList_. This is only intended to be used as an escape - hatch when working with standalone servers exposing a single database API - from behind a reverse proxy, which is not a recommended setup. - - - **arangoVersion**: `number` (Default: `30000`) - - Numeric representation of the ArangoDB version the driver should expect. - The format is defined as `XYYZZ` where `X` is the major version, `Y` is - the zero-filled two-digit minor version and `Z` is the zero-filled two-digit - bugfix version, e.g. `30102` for 3.1.2, `20811` for 2.8.11. - - Depending on this value certain methods may become unavailable or change - their behavior to remain compatible with different versions of ArangoDB. - - - **headers**: `Object` (optional) - - An object with additional headers to send with every request. - - Header names should always be lowercase. If an `"authorization"` header is - provided, it will be overridden when using _useBasicAuth_ or _useBearerAuth_. - - - **agent**: `Agent` (optional) - - An http Agent instance to use for connections. - - By default a new - [`http.Agent`](https://nodejs.org/api/http.html#http_new_agent_options) (or - https.Agent) instance will be created using the _agentOptions_. - - This option has no effect when using the browser version of arangojs. - - - **agentOptions**: `Object` (Default: see below) - - An object with options for the agent. This will be ignored if _agent_ is - also provided. - - Default: `{maxSockets: 3, keepAlive: true, keepAliveMsecs: 1000}`. - Browser default: `{maxSockets: 3, keepAlive: false}`; - - The option `maxSockets` can also be used to limit how many requests - arangojs will perform concurrently. The maximum number of requests is - equal to `maxSockets * 2` with `keepAlive: true` or - equal to `maxSockets` with `keepAlive: false`. - - In the browser version of arangojs this option can be used to pass - additional options to the underlying calls of the - [`xhr`](https://www.npmjs.com/package/xhr) module. - - - **loadBalancingStrategy**: `string` (Default: `"NONE"`) - - Determines the behavior when multiple URLs are provided: - - - `NONE`: No load balancing. All requests will be handled by the first - URL in the list until a network error is encountered. On network error, - arangojs will advance to using the next URL in the list. - - - `ONE_RANDOM`: Randomly picks one URL from the list initially, then - behaves like `NONE`. - - - `ROUND_ROBIN`: Every sequential request uses the next URL in the list. - - - **maxRetries**: `number` or `false` (Default: `0`) - - Determines the behavior when a request fails because the underlying - connection to the server could not be opened - (i.e. [`ECONNREFUSED` in Node.js](https://nodejs.org/api/errors.html#errors_common_system_errors)): - - - `false`: the request fails immediately. - - - `0`: the request is retried until a server can be reached but only a - total number of times matching the number of known servers (including - the initial failed request). - - - any other number: the request is retried until a server can be reached - the request has been retried a total of `maxRetries` number of times - (not including the initial failed request). - - When working with a single server without leader/follower failover, the - retries (if any) will be made to the same server. - - This setting currently has no effect when using arangojs in a browser. - - **Note**: Requests bound to a specific server (e.g. fetching query results) - will never be retried automatically and ignore this setting. - -## database.acquireHostList - -`async database.acquireHostList(): this` - -Updates the URL list by requesting a list of all coordinators in the cluster -and adding any endpoints not initially specified in the _url_ configuration. - -For long-running processes communicating with an ArangoDB cluster it is -recommended to run this method repeatedly (e.g. once per hour) to make sure -new coordinators are picked up correctly and can be used for fail-over or -load balancing. - -**Note**: This method can not be used when the arangojs instance was created -with `isAbsolute: true`. - -## database.useDatabase - -`database.useDatabase(databaseName): this` - -Updates the _Database_ instance and its connection string to use the given -_databaseName_, then returns itself. - -**Note**: This method can not be used when the arangojs instance was created -with `isAbsolute: true`. - -**Arguments** - -- **databaseName**: `string` - - The name of the database to use. - -**Examples** - -```js -const db = new Database(); -db.useDatabase("test"); -// The database instance now uses the database "test". -``` - -## database.useBasicAuth - -`database.useBasicAuth([username, [password]]): this` - -Updates the _Database_ instance's `authorization` header to use Basic -authentication with the given _username_ and _password_, then returns itself. - -**Arguments** - -- **username**: `string` (Default: `"root"`) - - The username to authenticate with. - -- **password**: `string` (Default: `""`) - - The password to authenticate with. - -**Examples** - -```js -const db = new Database(); -db.useDatabase("test"); -db.useBasicAuth("admin", "hunter2"); -// The database instance now uses the database "test" -// with the username "admin" and password "hunter2". -``` - -## database.useBearerAuth - -`database.useBearerAuth(token): this` - -Updates the _Database_ instance's `authorization` header to use Bearer -authentication with the given authentication token, then returns itself. - -**Arguments** - -- **token**: `string` - - The token to authenticate with. - -**Examples** - -```js -const db = new Database(); -db.useBearerAuth("keyboardcat"); -// The database instance now uses Bearer authentication. -``` - -## database.login - -`async database.login([username, [password]]): string` - -Validates the given database credentials and exchanges them for an -authentication token, then uses the authentication token for future -requests and returns it. - -**Arguments** - -- **username**: `string` (Default: `"root"`) - - The username to authenticate with. - -- **password**: `string` (Default: `""`) - - The password to authenticate with. - -**Examples** - -```js -const db = new Database(); -db.useDatabase("test"); -await db.login("admin", "hunter2"); -// The database instance now uses the database "test" -// with an authentication token for the "admin" user. -``` - -## database.version - -`async database.version(): Object` - -Fetches the ArangoDB version information for the active database from the server. - -**Examples** - -```js -const db = new Database(); -const version = await db.version(); -// the version object contains the ArangoDB version information. -``` - -## database.close - -`database.close(): void` - -Closes all active connections of the database instance. -Can be used to clean up idling connections during longer periods of inactivity. - -**Note**: This method currently has no effect in the browser version of arangojs. - -**Examples** - -```js -const db = new Database(); -const sessions = db.collection("sessions"); -// Clean up expired sessions once per hour -setInterval(async () => { - await db.query(aql` - FOR session IN ${sessions} - FILTER session.expires < DATE_NOW() - REMOVE session IN ${sessions} - `); - // Make sure to close the connections because they're no longer used - db.close(); -}, 1000 * 60 * 60); -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Database/Transactions.md b/Documentation/Books/Drivers/JS/Reference/Database/Transactions.md deleted file mode 100644 index 3fc5ecc19f4c..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Database/Transactions.md +++ /dev/null @@ -1,103 +0,0 @@ - -# Transactions - -This function implements the -[HTTP API for transactions](../../../..//HTTP/Transaction/index.html). - -## database.transaction - -`async database.transaction(collections, action, [params, [options]]): Object` - -Performs a server-side transaction and returns its return value. - -**Arguments** - -- **collections**: `Object` - - An object with the following properties: - - - **read**: `Array` (optional) - - An array of names (or a single name) of collections that will be read from - during the transaction. - - - **write**: `Array` (optional) - - An array of names (or a single name) of collections that will be written to - or read from during the transaction. - -- **action**: `string` - - A string evaluating to a JavaScript function to be executed on the server. - - {% hint 'warning ' %} - This function will be executed on the server inside ArangoDB and can not use - the arangojs driver or any variables other than those passed as _params_. - For accessing the database from within ArangoDB, see the documentation for the - [`@arangodb` module in ArangoDB](../../../..//Manual/Appendix/JavaScriptModules/ArangoDB.html). - {% endhint %} - -- **params**: `Object` (optional) - - Available as variable `params` when the _action_ function is being executed on - server. Check the example below. - -- **options**: `Object` (optional) - - An object with any of the following properties: - - - **lockTimeout**: `number` (optional) - - Determines how long the database will wait while attempting to gain locks on - collections used by the transaction before timing out. - - - **waitForSync**: `boolean` (optional) - - Determines whether to force the transaction to write all data to disk before returning. - - - **maxTransactionSize**: `number` (optional) - - Determines the transaction size limit in bytes. Honored by the RocksDB storage engine only. - - - **intermediateCommitCount**: `number` (optional) - - Determines the maximum number of operations after which an intermediate commit is - performed automatically. Honored by the RocksDB storage engine only. - - - **intermediateCommitSize**: `number` (optional) - - Determine the maximum total size of operations after which an intermediate commit is - performed automatically. Honored by the RocksDB storage engine only. - -If _collections_ is an array or string, it will be treated as -_collections.write_. - -Please note that while _action_ should be a string evaluating to a well-formed -JavaScript function, it's not possible to pass in a JavaScript function directly -because the function needs to be evaluated on the server and will be transmitted -in plain text. - -For more information on transactions, see the -[HTTP API documentation for transactions](../../../..//HTTP/Transaction/index.html). - -**Examples** - -```js -const db = new Database(); -const action = String(function(params) { - // This code will be executed inside ArangoDB! - const db = require("@arangodb").db; - return db - ._query( - aql` - FOR user IN _users - FILTER user.age > ${params.age} - RETURN u.user - ` - ) - .toArray(); -}); - -const result = await db.transaction({ read: "_users" }, action, { age: 12 }); -// result contains the return value of the action -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Database/ViewAccess.md b/Documentation/Books/Drivers/JS/Reference/Database/ViewAccess.md deleted file mode 100644 index 1d994262560a..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Database/ViewAccess.md +++ /dev/null @@ -1,71 +0,0 @@ - -# Accessing views - -These functions implement the -[HTTP API for accessing views](../../../..//HTTP/Views/Getting.html). - -## database.arangoSearchView - -`database.arangoSearchView(viewName): ArangoSearchView` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.4 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Returns a _ArangoSearchView_ instance for the given view name. - -**Arguments** - -- **viewName**: `string` - - Name of the arangosearch view. - -**Examples** - -```js -const db = new Database(); -const view = db.arangoSearchView("potatoes"); -``` - -## database.listViews - -`async database.listViews(): Array` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.4 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Fetches all views from the database and returns an array of view -descriptions. - -**Examples** - -```js -const db = new Database(); - -const views = await db.listViews(); -// views is an array of view descriptions -``` - -## database.views - -`async database.views([excludeSystem]): Array` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.4 or later, -see [Compatibility](../../GettingStarted/README.md#compatibility). -{% endhint %} - -Fetches all views from the database and returns an array of -_ArangoSearchView_ instances for the views. - -**Examples** - -```js -const db = new Database(); - -const views = await db.views(); -// views is an array of ArangoSearchView instances -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Graph/EdgeCollection.md b/Documentation/Books/Drivers/JS/Reference/Graph/EdgeCollection.md deleted file mode 100644 index 7e9953c5a453..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Graph/EdgeCollection.md +++ /dev/null @@ -1,310 +0,0 @@ - -# GraphEdgeCollection API - -The _GraphEdgeCollection API_ extends the -[_Collection API_](../Collection/README.md) with the following methods. - -## graphEdgeCollection.remove - -`async graphEdgeCollection.remove(documentHandle): Object` - -Deletes the edge with the given _documentHandle_ from the collection. - -**Arguments** - -- **documentHandle**: `string` - - The handle of the edge to retrieve. This can be either the `_id` or the `_key` - of an edge in the collection, or an edge (i.e. an object with an `_id` or - `_key` property). - -**Examples** - -```js -const graph = db.graph("some-graph"); -const collection = graph.edgeCollection("edges"); - -await collection.remove("some-key"); -// document 'edges/some-key' no longer exists - -// -- or -- - -await collection.remove("edges/some-key"); -// document 'edges/some-key' no longer exists -``` - -## graphEdgeCollection.documentExists - -`async graphEdgeCollection.documentExists(documentHandle): boolean` - -Checks whether the edge with the given _documentHandle_ exists. - -**Arguments** - -- **documentHandle**: `string` - - The handle of the edge to retrieve. This can be either the `_id` or the - `_key` of a edge in the collection, or an edge (i.e. an object with an - `_id` or `_key` property). - -**Examples** - -```js -const graph = db.graph("some-graph"); -const collection = graph.edgeCollection("edges"); - -const exists = await collection.documentExists("some-key"); -if (exists === false) { - // the edge does not exist -} -``` - -## graphEdgeCollection.document - -`async graphEdgeCollection.document(documentHandle, [opts]): Object` - -Alias: `graphEdgeCollection.edge`. - -Retrieves the edge with the given _documentHandle_ from the collection. - -**Arguments** - -- **documentHandle**: `string` - - The handle of the edge to retrieve. This can be either the `_id` or the `_key` - of an edge in the collection, or an edge (i.e. an object with an `_id` or - `_key` property). - -- **opts**: `Object` (optional) - - If _opts_ is set, it must be an object with any of the following properties: - - - **graceful**: `boolean` (Default: `false`) - - If set to `true`, the method will return `null` instead of throwing an - error if the edge does not exist. - - - **allowDirtyRead**: `boolean` (Default: `false`) - - {% hint 'info' %} - This option is only available when targeting ArangoDB 3.4 or later, - see [Compatibility](../../GettingStarted/README.md#compatibility). - {% endhint %} - - If set to `true`, the request will explicitly permit ArangoDB to return a - potentially dirty or stale result and arangojs will load balance the - request without distinguishing between leaders and followers. - -If a boolean is passed instead of an options object, it will be interpreted as -the _graceful_ option. - -**Examples** - -```js -const graph = db.graph("some-graph"); -const collection = graph.edgeCollection("edges"); - -const edge = await collection.document("some-key"); -// the edge exists -assert.equal(edge._key, "some-key"); -assert.equal(edge._id, "edges/some-key"); - -// -- or -- - -const edge = await collection.document("edges/some-key"); -// the edge exists -assert.equal(edge._key, "some-key"); -assert.equal(edge._id, "edges/some-key"); - -// -- or -- - -const edge = await collection.document("some-key", true); -if (edge === null) { - // the edge does not exist -} -``` - -## graphEdgeCollection.save - -`async graphEdgeCollection.save(data, [fromId, toId]): Object` - -Creates a new edge between the vertices _fromId_ and _toId_ with the given -_data_. - -**Arguments** - -- **data**: `Object` - - The data of the new edge. If _fromId_ and _toId_ are not specified, the _data_ - needs to contain the properties **from\_ and **to\_. - -- **fromId**: `string` (optional) - - The handle of the start vertex of this edge. This can be either the `_id` of a - document in the database, the `_key` of an edge in the collection, or a - document (i.e. an object with an `_id` or `_key` property). - -- **toId**: `string` (optional) - - The handle of the end vertex of this edge. This can be either the `_id` of a - document in the database, the `_key` of an edge in the collection, or a - document (i.e. an object with an `_id` or `_key` property). - -**Examples** - -```js -const db = new Database(); -const graph = db.graph("some-graph"); -const collection = graph.edgeCollection("edges"); -const edge = await collection.save( - { some: "data" }, - "vertices/start-vertex", - "vertices/end-vertex" -); -assert.equal(edge._id, "edges/" + edge._key); -assert.equal(edge.some, "data"); -assert.equal(edge._from, "vertices/start-vertex"); -assert.equal(edge._to, "vertices/end-vertex"); -``` - -## graphEdgeCollection.edges - -`async graphEdgeCollection.edges(documentHandle): Array` - -Retrieves a list of all edges of the document with the given _documentHandle_. - -**Arguments** - -- **documentHandle**: `string` - - The handle of the document to retrieve the edges of. This can be either the - `_id` of a document in the database, the `_key` of an edge in the collection, - or a document (i.e. an object with an `_id` or `_key` property). - -**Examples** - -```js -const db = new Database(); -const graph = db.graph("some-graph"); -const collection = graph.edgeCollection("edges"); -await collection.import([ - ["_key", "_from", "_to"], - ["x", "vertices/a", "vertices/b"], - ["y", "vertices/a", "vertices/c"], - ["z", "vertices/d", "vertices/a"] -]); -const edges = await collection.edges("vertices/a"); -assert.equal(edges.length, 3); -assert.deepEqual(edges.map(edge => edge._key), ["x", "y", "z"]); -``` - -## graphEdgeCollection.inEdges - -`async graphEdgeCollection.inEdges(documentHandle): Array` - -Retrieves a list of all incoming edges of the document with the given -_documentHandle_. - -**Arguments** - -- **documentHandle**: `string` - - The handle of the document to retrieve the edges of. This can be either the - `_id` of a document in the database, the `_key` of an edge in the collection, - or a document (i.e. an object with an `_id` or `_key` property). - -**Examples** - -```js -const db = new Database(); -const graph = db.graph("some-graph"); -const collection = graph.edgeCollection("edges"); -await collection.import([ - ["_key", "_from", "_to"], - ["x", "vertices/a", "vertices/b"], - ["y", "vertices/a", "vertices/c"], - ["z", "vertices/d", "vertices/a"] -]); -const edges = await collection.inEdges("vertices/a"); -assert.equal(edges.length, 1); -assert.equal(edges[0]._key, "z"); -``` - -## graphEdgeCollection.outEdges - -`async graphEdgeCollection.outEdges(documentHandle): Array` - -Retrieves a list of all outgoing edges of the document with the given -_documentHandle_. - -**Arguments** - -- **documentHandle**: `string` - - The handle of the document to retrieve the edges of. This can be either the - `_id` of a document in the database, the `_key` of an edge in the collection, - or a document (i.e. an object with an `_id` or `_key` property). - -**Examples** - -```js -const db = new Database(); -const graph = db.graph("some-graph"); -const collection = graph.edgeCollection("edges"); -await collection.import([ - ["_key", "_from", "_to"], - ["x", "vertices/a", "vertices/b"], - ["y", "vertices/a", "vertices/c"], - ["z", "vertices/d", "vertices/a"] -]); -const edges = await collection.outEdges("vertices/a"); -assert.equal(edges.length, 2); -assert.deepEqual(edges.map(edge => edge._key), ["x", "y"]); -``` - -## graphEdgeCollection.traversal - -`async graphEdgeCollection.traversal(startVertex, opts): Object` - -Performs a traversal starting from the given _startVertex_ and following edges -contained in this edge collection. - -**Arguments** - -- **startVertex**: `string` - - The handle of the start vertex. This can be either the `_id` of a document in - the database, the `_key` of an edge in the collection, or a document (i.e. an - object with an `_id` or `_key` property). - -- **opts**: `Object` - - See - [the HTTP API documentation](../../../..//HTTP/Traversal/index.html) - for details on the additional arguments. - - Please note that while _opts.filter_, _opts.visitor_, _opts.init_, - _opts.expander_ and _opts.sort_ should be strings evaluating to well-formed - JavaScript code, it's not possible to pass in JavaScript functions directly - because the code needs to be evaluated on the server and will be transmitted - in plain text. - -**Examples** - -```js -const db = new Database(); -const graph = db.graph("some-graph"); -const collection = graph.edgeCollection("edges"); -await collection.import([ - ["_key", "_from", "_to"], - ["x", "vertices/a", "vertices/b"], - ["y", "vertices/b", "vertices/c"], - ["z", "vertices/c", "vertices/d"] -]); -const result = await collection.traversal("vertices/a", { - direction: "outbound", - visitor: "result.vertices.push(vertex._key);", - init: "result.vertices = [];" -}); -assert.deepEqual(result.vertices, ["a", "b", "c", "d"]); -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Graph/Edges.md b/Documentation/Books/Drivers/JS/Reference/Graph/Edges.md deleted file mode 100644 index 5147a716bd4a..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Graph/Edges.md +++ /dev/null @@ -1,165 +0,0 @@ - -# Manipulating edges - -## graph.edgeCollection - -`graph.edgeCollection(collectionName): GraphEdgeCollection` - -Returns a new [_GraphEdgeCollection_ instance](EdgeCollection.md) with -the given name bound to this graph. - -**Arguments** - -* **collectionName**: `string` - - Name of the edge collection. - -**Examples** - -```js -const db = new Database(); -// assuming the collections "edges" and "vertices" exist -const graph = db.graph("some-graph"); -const collection = graph.edgeCollection("edges"); -assert.equal(collection.name, "edges"); -// collection is a GraphEdgeCollection -``` - -## graph.addEdgeDefinition - -`async graph.addEdgeDefinition(definition): Object` - -Adds the given edge definition _definition_ to the graph. - -**Arguments** - -* **definition**: `Object` - - For more information on edge definitions see - [the HTTP API for managing graphs](../../../..//HTTP/Gharial/Management.html). - -**Examples** - -```js -const db = new Database(); -// assuming the collections "edges" and "vertices" exist -const graph = db.graph('some-graph'); -await graph.addEdgeDefinition({ - collection: 'edges', - from: ['vertices'], - to: ['vertices'] -}); -// the edge definition has been added to the graph -``` - -## graph.replaceEdgeDefinition - -`async graph.replaceEdgeDefinition(collectionName, definition): Object` - -Replaces the edge definition for the edge collection named _collectionName_ with -the given _definition_. - -**Arguments** - -* **collectionName**: `string` - - Name of the edge collection to replace the definition of. - -* **definition**: `Object` - - For more information on edge definitions see - [the HTTP API for managing graphs](../../../..//HTTP/Gharial/Management.html). - -**Examples** - -```js -const db = new Database(); -// assuming the collections "edges", "vertices" and "more-vertices" exist -const graph = db.graph('some-graph'); -await graph.replaceEdgeDefinition('edges', { - collection: 'edges', - from: ['vertices'], - to: ['more-vertices'] -}); -// the edge definition has been modified -``` - -## graph.removeEdgeDefinition - -`async graph.removeEdgeDefinition(definitionName, [dropCollection]): Object` - -Removes the edge definition with the given _definitionName_ form the graph. - -**Arguments** - -* **definitionName**: `string` - - Name of the edge definition to remove from the graph. - -* **dropCollection**: `boolean` (optional) - - If set to `true`, the edge collection associated with the definition will also - be deleted from the database. - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); - -await graph.removeEdgeDefinition('edges') -// the edge definition has been removed - -// -- or -- - -await graph.removeEdgeDefinition('edges', true) -// the edge definition has been removed -// and the edge collection "edges" has been dropped -// this may have been a bad idea -``` - -## graph.traversal - -`async graph.traversal(startVertex, opts): Object` - -Performs a traversal starting from the given _startVertex_ and following edges -contained in any of the edge collections of this graph. - -**Arguments** - -* **startVertex**: `string` - - The handle of the start vertex. This can be either the `_id` of a document in - the graph or a document (i.e. an object with an `_id` property). - -* **opts**: `Object` - - See - [the HTTP API documentation](../../../..//HTTP/Traversal/index.html) - for details on the additional arguments. - - Please note that while _opts.filter_, _opts.visitor_, _opts.init_, - _opts.expander_ and _opts.sort_ should be strings evaluating to well-formed - JavaScript functions, it's not possible to pass in JavaScript functions - directly because the functions need to be evaluated on the server and will be - transmitted in plain text. - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); -const collection = graph.edgeCollection('edges'); -await collection.import([ - ['_key', '_from', '_to'], - ['x', 'vertices/a', 'vertices/b'], - ['y', 'vertices/b', 'vertices/c'], - ['z', 'vertices/c', 'vertices/d'] -]) -const result = await graph.traversal('vertices/a', { - direction: 'outbound', - visitor: 'result.vertices.push(vertex._key);', - init: 'result.vertices = [];' -}); -assert.deepEqual(result.vertices, ['a', 'b', 'c', 'd']); -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Graph/README.md b/Documentation/Books/Drivers/JS/Reference/Graph/README.md deleted file mode 100644 index b5bbf962c3c8..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Graph/README.md +++ /dev/null @@ -1,86 +0,0 @@ - -# Graph API - -These functions implement the -[HTTP API for manipulating graphs](../../../..//HTTP/Gharial/index.html). - -## graph.exists - -`async graph.exists(): boolean` - -Checks whether the graph exists. - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); -const result = await graph.exists(); -// result indicates whether the graph exists -``` - -## graph.get - -`async graph.get(): Object` - -Retrieves general information about the graph. - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); -const data = await graph.get(); -// data contains general information about the graph -``` - -## graph.create - -`async graph.create(properties): Object` - -Creates a graph with the given _properties_ for this graph's name, then returns -the server response. - -**Arguments** - -- **properties**: `Object` - - For more information on the _properties_ object, see - [the HTTP API documentation for creating graphs](../../../..//HTTP/Gharial/Management.html). - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); -const info = await graph.create({ - edgeDefinitions: [{ - collection: 'edges', - from: ['start-vertices'], - to: ['end-vertices'] - }] -}); -// graph now exists -``` - -## graph.drop - -`async graph.drop([dropCollections]): Object` - -Deletes the graph from the database. - -**Arguments** - -- **dropCollections**: `boolean` (optional) - - If set to `true`, the collections associated with the graph will also be - deleted. - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); -await graph.drop(); -// the graph "some-graph" no longer exists -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Graph/VertexCollection.md b/Documentation/Books/Drivers/JS/Reference/Graph/VertexCollection.md deleted file mode 100644 index fd1e38ec7b9e..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Graph/VertexCollection.md +++ /dev/null @@ -1,148 +0,0 @@ - -# GraphVertexCollection API - -The _GraphVertexCollection API_ extends the -[_Collection API_](../Collection/README.md) with the following methods. - -## graphVertexCollection.remove - -`async graphVertexCollection.remove(documentHandle): Object` - -Deletes the vertex with the given _documentHandle_ from the collection. - -**Arguments** - -- **documentHandle**: `string` - - The handle of the vertex to retrieve. This can be either the `_id` or the - `_key` of a vertex in the collection, or a vertex (i.e. an object with an - `_id` or `_key` property). - -**Examples** - -```js -const graph = db.graph("some-graph"); -const collection = graph.vertexCollection("vertices"); - -await collection.remove("some-key"); -// document 'vertices/some-key' no longer exists - -// -- or -- - -await collection.remove("vertices/some-key"); -// document 'vertices/some-key' no longer exists -``` - -## graphVertexCollection.documentExists - -`async graphVertexCollection.documentExists(documentHandle): boolean` - -Checks whether the vertex with the given _documentHandle_ exists. - -**Arguments** - -- **documentHandle**: `string` - - The handle of the vertex to retrieve. This can be either the `_id` or the - `_key` of a vertex in the collection, or a vertex (i.e. an object with an - `_id` or `_key` property). - -**Examples** - -```js -const graph = db.graph("some-graph"); -const collection = graph.vertexCollection("vertices"); - -const exists = await collection.documentExists("some-key"); -if (exists === false) { - // the vertex does not exist -} -``` - -## graphVertexCollection.document - -`async graphVertexCollection.document(documentHandle, [graceful]): Object` - -Alias: `graphVertexCollection.vertex`. - -Retrieves the vertex with the given _documentHandle_ from the collection. - -**Arguments** - -- **documentHandle**: `string` - - The handle of the vertex to retrieve. This can be either the `_id` or the - `_key` of a vertex in the collection, or a vertex (i.e. an object with an - `_id` or `_key` property). - -- **opts**: `Object` (optional) - - If _opts_ is set, it must be an object with any of the following properties: - - - **graceful**: `boolean` (Default: `false`) - - If set to `true`, the method will return `null` instead of throwing an - error if the vertex does not exist. - - - **allowDirtyRead**: `boolean` (Default: `false`) - - {% hint 'info' %} - This option is only available when targeting ArangoDB 3.4 or later, - see [Compatibility](../../GettingStarted/README.md#compatibility). - {% endhint %} - - If set to `true`, the request will explicitly permit ArangoDB to return a - potentially dirty or stale result and arangojs will load balance the - request without distinguishing between leaders and followers. - -If a boolean is passed instead of an options object, it will be interpreted as -the _graceful_ option. - -**Examples** - -```js -const graph = db.graph("some-graph"); -const collection = graph.vertexCollection("vertices"); - -const doc = await collection.document("some-key"); -// the vertex exists -assert.equal(doc._key, "some-key"); -assert.equal(doc._id, "vertices/some-key"); - -// -- or -- - -const doc = await collection.document("vertices/some-key"); -// the vertex exists -assert.equal(doc._key, "some-key"); -assert.equal(doc._id, "vertices/some-key"); - -// -- or -- - -const doc = await collection.vertex("some-key", true); -if (doc === null) { - // the vertex does not exist -} -``` - -## graphVertexCollection.save - -`async graphVertexCollection.save(data): Object` - -Creates a new vertex with the given _data_. - -**Arguments** - -- **data**: `Object` - - The data of the vertex. - -**Examples** - -```js -const db = new Database(); -const graph = db.graph("some-graph"); -const collection = graph.vertexCollection("vertices"); -const doc = await collection.save({ some: "data" }); -assert.equal(doc._id, "vertices/" + doc._key); -assert.equal(doc.some, "data"); -``` diff --git a/Documentation/Books/Drivers/JS/Reference/Graph/Vertices.md b/Documentation/Books/Drivers/JS/Reference/Graph/Vertices.md deleted file mode 100644 index 69d39ee64959..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Graph/Vertices.md +++ /dev/null @@ -1,135 +0,0 @@ - -# Manipulating vertices - -## graph.vertexCollection - -`graph.vertexCollection(collectionName): GraphVertexCollection` - -Returns a new [_GraphVertexCollection_ instance](VertexCollection.md) -with the given name for this graph. - -**Arguments** - -* **collectionName**: `string` - - Name of the vertex collection. - -**Examples** - -```js -const db = new Database(); -const graph = db.graph("some-graph"); -const collection = graph.vertexCollection("vertices"); -assert.equal(collection.name, "vertices"); -// collection is a GraphVertexCollection -``` - -## graph.listVertexCollections - -`async graph.listVertexCollections([excludeOrphans]): Array` - -Fetches all vertex collections from the graph and returns an array of collection descriptions. - -**Arguments** - -* **excludeOrphans**: `boolean` (Default: `false`) - - Whether orphan collections should be excluded. - -**Examples** - -```js -const graph = db.graph('some-graph'); - -const collections = await graph.listVertexCollections(); -// collections is an array of collection descriptions -// including orphan collections - -// -- or -- - -const collections = await graph.listVertexCollections(true); -// collections is an array of collection descriptions -// not including orphan collections -``` - -## graph.vertexCollections - -`async graph.vertexCollections([excludeOrphans]): Array` - -Fetches all vertex collections from the database and returns an array of _GraphVertexCollection_ instances for the collections. - -**Arguments** - -* **excludeOrphans**: `boolean` (Default: `false`) - - Whether orphan collections should be excluded. - -**Examples** - -```js -const graph = db.graph('some-graph'); - -const collections = await graph.vertexCollections() -// collections is an array of GraphVertexCollection -// instances including orphan collections - -// -- or -- - -const collections = await graph.vertexCollections(true) -// collections is an array of GraphVertexCollection -// instances not including orphan collections -``` - -## graph.addVertexCollection - -`async graph.addVertexCollection(collectionName): Object` - -Adds the collection with the given _collectionName_ to the graph's vertex -collections. - -**Arguments** - -* **collectionName**: `string` - - Name of the vertex collection to add to the graph. - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); -await graph.addVertexCollection('vertices'); -// the collection "vertices" has been added to the graph -``` - -## graph.removeVertexCollection - -`async graph.removeVertexCollection(collectionName, [dropCollection]): Object` - -Removes the vertex collection with the given _collectionName_ from the graph. - -**Arguments** - -* **collectionName**: `string` - - Name of the vertex collection to remove from the graph. - -* **dropCollection**: `boolean` (optional) - - If set to `true`, the collection will also be deleted from the database. - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); -await graph.removeVertexCollection('vertices') -// collection "vertices" has been removed from the graph - -// -- or -- - -await graph.removeVertexCollection('vertices', true) -// collection "vertices" has been removed from the graph -// the collection has also been dropped from the database -// this may have been a bad idea -``` diff --git a/Documentation/Books/Drivers/JS/Reference/README.md b/Documentation/Books/Drivers/JS/Reference/README.md deleted file mode 100644 index f64a9048654b..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/README.md +++ /dev/null @@ -1,30 +0,0 @@ - -# ArangoDB JavaScript Driver - Reference - -- [Database](Database/README.md) - - [Database Manipulation](Database/DatabaseManipulation.md) - - [Collection Access](Database/CollectionAccess.md) - - [View Access](Database/ViewAccess.md) - - [Queries](Database/Queries.md) - - [AQL User Functions](Database/AqlUserFunctions.md) - - [Transactions](Database/Transactions.md) - - [Graph Access](Database/GraphAccess.md) - - [Foxx Services](Database/FoxxServices.md) - - [HTTP Routes](Database/HttpRoutes.md) -- [Collection](Collection/README.md) - - [Collection Manipulation](Collection/CollectionManipulation.md) - - [Document Manipulation](Collection/DocumentManipulation.md) - - [DocumentCollection](Collection/DocumentCollection.md) - - [EdgeCollection](Collection/EdgeCollection.md) - - [Indexes](Collection/Indexes.md) - - [Simple Queries](Collection/SimpleQueries.md) - - [Bulk Import](Collection/BulkImport.md) -- [AQL Helpers](Aql.md) -- [View Manipulation](ViewManipulation.md) -- [Cursor](Cursor.md) -- [Graph](Graph/README.md) - - [Vertices](Graph/Vertices.md) - - [Edges](Graph/Edges.md) - - [VertexCollection](Graph/VertexCollection.md) - - [EdgeCollection](Graph/EdgeCollection.md) -- [Route](Route.md) diff --git a/Documentation/Books/Drivers/JS/Reference/Route.md b/Documentation/Books/Drivers/JS/Reference/Route.md deleted file mode 100644 index d05f5802d0c2..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/Route.md +++ /dev/null @@ -1,386 +0,0 @@ - -# Route API - -_Route_ instances provide access for arbitrary HTTP requests. This allows easy -access to Foxx services and other HTTP APIs not covered by the driver itself. - -## route.route - -`route.route([path], [headers]): Route` - -Returns a new _Route_ instance for the given path (relative to the current -route) that can be used to perform arbitrary HTTP requests. - -**Arguments** - -- **path**: `string` (optional) - - The relative URL of the route. - -- **headers**: `Object` (optional) - - Default headers that should be sent with each request to the route. - -If _path_ is missing, the route will refer to the base URL of the database. - -**Examples** - -```js -const db = new Database(); -const route = db.route("my-foxx-service"); -const users = route.route("users"); -// equivalent to db.route('my-foxx-service/users') -``` - -## route.get - -`async route.get([path,] [qs]): Response` - -Performs a GET request to the given URL and returns the server response. - -**Arguments** - -- **path**: `string` (optional) - - The route-relative URL for the request. If omitted, the request will be made - to the base URL of the route. - -- **qs**: `string` (optional) - - The query string for the request. If _qs_ is an object, it will be translated - to a query string. - -**Examples** - -```js -const db = new Database(); -const route = db.route("my-foxx-service"); -const response = await route.get(); -// response.body is the response body of calling -// GET _db/_system/my-foxx-service - -// -- or -- - -const response = await route.get("users"); -// response.body is the response body of calling -// GET _db/_system/my-foxx-service/users - -// -- or -- - -const response = await route.get("users", { group: "admin" }); -// response.body is the response body of calling -// GET _db/_system/my-foxx-service/users?group=admin -``` - -## route.post - -`async route.post([path,] [body, [qs]]): Response` - -Performs a POST request to the given URL and returns the server response. - -**Arguments** - -- **path**: `string` (optional) - - The route-relative URL for the request. If omitted, the request will be made - to the base URL of the route. - -- **body**: `string` (optional) - - The response body. If _body_ is an object, it will be encoded as JSON. - -- **qs**: `string` (optional) - - The query string for the request. If _qs_ is an object, it will be translated - to a query string. - -**Examples** - -```js -const db = new Database(); -const route = db.route("my-foxx-service"); -const response = await route.post(); -// response.body is the response body of calling -// POST _db/_system/my-foxx-service - -// -- or -- - -const response = await route.post("users"); -// response.body is the response body of calling -// POST _db/_system/my-foxx-service/users - -// -- or -- - -const response = await route.post("users", { - username: "admin", - password: "hunter2" -}); -// response.body is the response body of calling -// POST _db/_system/my-foxx-service/users -// with JSON request body {"username": "admin", "password": "hunter2"} - -// -- or -- - -const response = await route.post( - "users", - { - username: "admin", - password: "hunter2" - }, - { admin: true } -); -// response.body is the response body of calling -// POST _db/_system/my-foxx-service/users?admin=true -// with JSON request body {"username": "admin", "password": "hunter2"} -``` - -## route.put - -`async route.put([path,] [body, [qs]]): Response` - -Performs a PUT request to the given URL and returns the server response. - -**Arguments** - -- **path**: `string` (optional) - - The route-relative URL for the request. If omitted, the request will be made - to the base URL of the route. - -- **body**: `string` (optional) - - The response body. If _body_ is an object, it will be encoded as JSON. - -- **qs**: `string` (optional) - - The query string for the request. If _qs_ is an object, it will be translated - to a query string. - -**Examples** - -```js -const db = new Database(); -const route = db.route("my-foxx-service"); -const response = await route.put(); -// response.body is the response body of calling -// PUT _db/_system/my-foxx-service - -// -- or -- - -const response = await route.put("users/admin"); -// response.body is the response body of calling -// PUT _db/_system/my-foxx-service/users - -// -- or -- - -const response = await route.put("users/admin", { - username: "admin", - password: "hunter2" -}); -// response.body is the response body of calling -// PUT _db/_system/my-foxx-service/users/admin -// with JSON request body {"username": "admin", "password": "hunter2"} - -// -- or -- - -const response = await route.put( - "users/admin", - { - username: "admin", - password: "hunter2" - }, - { admin: true } -); -// response.body is the response body of calling -// PUT _db/_system/my-foxx-service/users/admin?admin=true -// with JSON request body {"username": "admin", "password": "hunter2"} -``` - -## route.patch - -`async route.patch([path,] [body, [qs]]): Response` - -Performs a PATCH request to the given URL and returns the server response. - -**Arguments** - -- **path**: `string` (optional) - - The route-relative URL for the request. If omitted, the request will be made - to the base URL of the route. - -- **body**: `string` (optional) - - The response body. If _body_ is an object, it will be encoded as JSON. - -- **qs**: `string` (optional) - - The query string for the request. If _qs_ is an object, it will be translated - to a query string. - -**Examples** - -```js -const db = new Database(); -const route = db.route("my-foxx-service"); -const response = await route.patch(); -// response.body is the response body of calling -// PATCH _db/_system/my-foxx-service - -// -- or -- - -const response = await route.patch("users/admin"); -// response.body is the response body of calling -// PATCH _db/_system/my-foxx-service/users - -// -- or -- - -const response = await route.patch("users/admin", { - password: "hunter2" -}); -// response.body is the response body of calling -// PATCH _db/_system/my-foxx-service/users/admin -// with JSON request body {"password": "hunter2"} - -// -- or -- - -const response = await route.patch( - "users/admin", - { - password: "hunter2" - }, - { admin: true } -); -// response.body is the response body of calling -// PATCH _db/_system/my-foxx-service/users/admin?admin=true -// with JSON request body {"password": "hunter2"} -``` - -## route.delete - -`async route.delete([path,] [qs]): Response` - -Performs a DELETE request to the given URL and returns the server response. - -**Arguments** - -- **path**: `string` (optional) - - The route-relative URL for the request. If omitted, the request will be made - to the base URL of the route. - -- **qs**: `string` (optional) - - The query string for the request. If _qs_ is an object, it will be translated - to a query string. - -**Examples** - -```js -const db = new Database(); -const route = db.route("my-foxx-service"); -const response = await route.delete(); -// response.body is the response body of calling -// DELETE _db/_system/my-foxx-service - -// -- or -- - -const response = await route.delete("users/admin"); -// response.body is the response body of calling -// DELETE _db/_system/my-foxx-service/users/admin - -// -- or -- - -const response = await route.delete("users/admin", { permanent: true }); -// response.body is the response body of calling -// DELETE _db/_system/my-foxx-service/users/admin?permanent=true -``` - -## route.head - -`async route.head([path,] [qs]): Response` - -Performs a HEAD request to the given URL and returns the server response. - -**Arguments** - -- **path**: `string` (optional) - - The route-relative URL for the request. If omitted, the request will be made - to the base URL of the route. - -- **qs**: `string` (optional) - - The query string for the request. If _qs_ is an object, it will be translated - to a query string. - -**Examples** - -```js -const db = new Database(); -const route = db.route("my-foxx-service"); -const response = await route.head(); -// response is the response object for -// HEAD _db/_system/my-foxx-service -``` - -## route.request - -`async route.request([opts]): Response` - -Performs an arbitrary request to the given URL and returns the server response. - -**Arguments** - -- **opts**: `Object` (optional) - - An object with any of the following properties: - - - **path**: `string` (optional) - - The route-relative URL for the request. If omitted, the request will be made - to the base URL of the route. - - - **absolutePath**: `boolean` (Default: `false`) - - Whether the _path_ is relative to the connection's base URL instead of the - route. - - - **body**: `string` (optional) - - The response body. If _body_ is an object, it will be encoded as JSON. - - - **qs**: `string` (optional) - - The query string for the request. If _qs_ is an object, it will be - translated to a query string. - - - **headers**: `Object` (optional) - - An object containing additional HTTP headers to be sent with the request. - - - **method**: `string` (Default: `"GET"`) - - HTTP method of this request. - - - **timeout**: `number` (optional) - - A non-negative amount of milliseconds after which the request will be - aborted. Note that ArangoDB may continue processing the request even - after it has timed out. - -**Examples** - -```js -const db = new Database(); -const route = db.route("my-foxx-service"); -const response = await route.request({ - path: "hello-world", - method: "POST", - body: { hello: "world" }, - qs: { admin: true } -}); -// response.body is the response body of calling -// POST _db/_system/my-foxx-service/hello-world?admin=true -// with JSON request body '{"hello": "world"}' -``` diff --git a/Documentation/Books/Drivers/JS/Reference/ViewManipulation.md b/Documentation/Books/Drivers/JS/Reference/ViewManipulation.md deleted file mode 100644 index c7cbad4b634f..000000000000 --- a/Documentation/Books/Drivers/JS/Reference/ViewManipulation.md +++ /dev/null @@ -1,190 +0,0 @@ - -# View API - -These functions implement the -[HTTP API for manipulating views](../../..//HTTP/Views/index.html). - -## view.exists - -`async view.exists(): boolean` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.4 or later, -see [Compatibility](../GettingStarted/README.md#compatibility). -{% endhint %} - -Checks whether the view exists. - -**Examples** - -```js -const db = new Database(); -const view = db.arangoSearchView("some-view"); -const result = await view.exists(); -// result indicates whether the view exists -``` - -### view.get - -`async view.get(): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.4 or later, -see [Compatibility](../GettingStarted/README.md#compatibility). -{% endhint %} - -Retrieves general information about the view. - -**Examples** - -```js -const db = new Database(); -const view = db.arangoSearchView("some-view"); -const data = await view.get(); -// data contains general information about the view -``` - -### view.properties - -`async view.properties(): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.4 or later, -see [Compatibility](../GettingStarted/README.md#compatibility). -{% endhint %} - -Retrieves the view's properties. - -**Examples** - -```js -const db = new Database(); -const view = db.arangoSearchView("some-view"); -const data = await view.properties(); -// data contains the view's properties -``` - -## view.create - -`async view.create([properties]): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.4 or later, -see [Compatibility](../GettingStarted/README.md#compatibility). -{% endhint %} - -Creates a view with the given _properties_ for this view's name, -then returns the server response. - -**Arguments** - -- **properties**: `Object` (optional) - - For more information on the _properties_ object, see the - [HTTP API documentation for creating views](../../..//HTTP/Views/ArangoSearch.html). - -**Examples** - -```js -const db = new Database(); -const view = db.arangoSearchView("potatoes"); -await view.create(); -// the arangosearch view "potatoes" now exists -``` - -## view.setProperties - -`async view.setProperties(properties): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.4 or later, -see [Compatibility](../GettingStarted/README.md#compatibility). -{% endhint %} - -Updates the properties of the view. - -**Arguments** - -- **properties**: `Object` - - For information on the _properties_ argument see the - [HTTP API for modifying views](../../..//HTTP/Views/ArangoSearch.html). - -**Examples** - -```js -const db = new Database(); -const view = db.arangoSearchView("some-view"); -const result = await view.setProperties({ consolidationIntervalMsec: 123 }); -assert.equal(result.consolidationIntervalMsec, 123); -``` - -## view.replaceProperties - -`async view.replaceProperties(properties): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.4 or later, -see [Compatibility](../GettingStarted/README.md#compatibility). -{% endhint %} - -Replaces the properties of the view. - -**Arguments** - -- **properties**: `Object` - - For information on the _properties_ argument see the - [HTTP API for modifying views](../../..//HTTP/Views/ArangoSearch.html). - -**Examples** - -```js -const db = new Database(); -const view = db.arangoSearchView("some-view"); -const result = await view.replaceProperties({ consolidationIntervalMsec: 234 }); -assert.equal(result.consolidationIntervalMsec, 234); -``` - -## view.rename - -`async view.rename(name): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.4 or later, -see [Compatibility](../GettingStarted/README.md#compatibility). -{% endhint %} - -Renames the view. The _View_ instance will automatically update its -name when the rename succeeds. - -**Examples** - -```js -const db = new Database(); -const view = db.arangoSearchView("some-view"); -const result = await view.rename("new-view-name"); -assert.equal(result.name, "new-view-name"); -assert.equal(view.name, result.name); -// result contains additional information about the view -``` - -## view.drop - -`async view.drop(): Object` - -{% hint 'info' %} -This method is only available when targeting ArangoDB 3.4 or later, -see [Compatibility](../GettingStarted/README.md#compatibility). -{% endhint %} - -Deletes the view from the database. - -**Examples** - -```js -const db = new Database(); -const view = db.arangoSearchView("some-view"); -await view.drop(); -// the view "some-view" no longer exists -``` diff --git a/Documentation/Books/Drivers/Java/GettingStarted/README.md b/Documentation/Books/Drivers/Java/GettingStarted/README.md deleted file mode 100644 index b41c098d0a3b..000000000000 --- a/Documentation/Books/Drivers/Java/GettingStarted/README.md +++ /dev/null @@ -1,50 +0,0 @@ - -# ArangoDB Java Driver - Getting Started - -## Supported versions - -arangodb-java-driver | ArangoDB | network protocol | Java version ----------------------|--------------|--------------------|------------- -5.x.x+ | 3.0.0+ | VelocyStream, HTTP | 1.6+ -4.2.x+ | 3.0.0+ | VelocyStream, HTTP | 1.6+ -4.1.x | 3.1.0+ | VelocyStream | 1.6+ -3.1.x | 3.1.0+ | HTTP | 1.6+ -3.0.x | 3.0.x | HTTP | 1.6+ -2.7.4 | 2.7.x, 2.8.x | HTTP | 1.6+ - -**Note**: VelocyStream is only supported in ArangoDB 3.1 and above. - -## Maven - -To add the driver to your project with maven, add the following code to your pom.xml -(please use a driver with a version number compatible to your ArangoDB server's version): - -ArangoDB 3.x.x - -```XML - - - com.arangodb - arangodb-java-driver - 5.0.0 - - -``` - -If you want to test with a snapshot version (e.g. 4.6.0-SNAPSHOT), -add the staging repository of oss.sonatype.org to your pom.xml: - -```XML - - - arangodb-snapshots - https://oss.sonatype.org/content/groups/staging - - -``` - -## Compile the Java Driver - -``` -mvn clean install -DskipTests=true -Dgpg.skip=true -Dmaven.javadoc.skip=true -B -``` diff --git a/Documentation/Books/Drivers/Java/README.md b/Documentation/Books/Drivers/Java/README.md deleted file mode 100644 index ee7da9e33a28..000000000000 --- a/Documentation/Books/Drivers/Java/README.md +++ /dev/null @@ -1,15 +0,0 @@ - -# ArangoDB Java Driver - -The official ArangoDB Java Driver. - -- [Getting Started](GettingStarted/README.md) -- [Reference](Reference/README.md) - -## See Also - -- [ChangeLog](https://raw.githubusercontent.com/arangodb/arangodb-java-driver/master/ChangeLog.md) -- [Examples](https://github.com/arangodb/arangodb-java-driver/tree/master/src/test/java/com/arangodb/example) -- [Tutorial](https://www.arangodb.com/tutorials/tutorial-sync-java-driver/) -- [JavaDoc](http://arangodb.github.io/arangodb-java-driver/javadoc-4_3/index.html) -- [JavaDoc VelocyPack](http://arangodb.github.io/java-velocypack/javadoc-1_0/index.html) diff --git a/Documentation/Books/Drivers/Java/Reference/Collection/BulkImport.md b/Documentation/Books/Drivers/Java/Reference/Collection/BulkImport.md deleted file mode 100644 index 31ee748f18ac..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Collection/BulkImport.md +++ /dev/null @@ -1,92 +0,0 @@ - -# Bulk importing documents - -This function implements the -[HTTP API for bulk imports](../../../..//HTTP/BulkImports/index.html). - -## ArangoCollection.importDocuments - -`ArangoCollection.importDocuments(Collection values, DocumentImportOptions options) : DocumentImportEntity` - -`ArangoCollection.importDocuments(String values, DocumentImportOptions options) : DocumentImportEntity` - -Bulk imports the given values into the collection. - -**Arguments** - -- **values**: `Collection` or `String` - - - `Collection`: A list of Objects that will be stored as documents - - - `String`: JSON-encoded array of objects that will be stored as documents - -- **options**: `DocumentImportOptions` - - - **fromPrefix**: `String` - - An optional prefix for the values in \_from attributes. If specified, - the value is automatically prepended to each \_from input value. - This allows specifying just the keys for \_from. - - - **toPrefix**: `String` - - An optional prefix for the values in \_to attributes. If specified, - the value is automatically prepended to each \_to input value. - This allows specifying just the keys for \_to. - - - **overwrite**: `Boolean` - - If this parameter has a value of true, then all data in the collection - will be removed prior to the import. Note that any existing index definitions - will be preserved. - - - **waitForSync**: `Boolean` - - Wait until documents have been synced to disk before returning. - - - **onDuplicate**: `OnDuplicate` - - Controls what action is carried out in case of a unique key constraint violation. - Possible values are: - - - **error**: this will not import the current document because of the - unique key constraint violation. This is the default setting. - - - **update**: this will update an existing document in the database with - the data specified in the request. Attributes of the existing document - that are not present in the request will be preserved. - - - **replace**: this will replace an existing document in the database with - the data specified in the request. - - - **ignore**: this will not update an existing document and simply ignore - the error caused by the unique key constraint violation. Note that update, - replace and ignore will only work when the import document in the request - contains the \_key attribute. update and replace may also fail because of - secondary unique key constraint violations. - - - **complete**: `Boolean` - - If set to true, it will make the whole import fail if any error occurs. - Otherwise the import will continue even if some documents cannot be imported. - - - **details**: `Boolean` - - If set to true, the result will include an attribute details with details - about documents that could not be imported. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -BaseDocument doc1 = new BaseDocument(); -BaseDocument doc2 = new BaseDocument(); -BaseDocument doc3 = new BaseDocument(); -collection.importDocuments( - Arrays.asList(doc1, doc2, doc3), - new DocumentImportOptions() -); -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Collection/CollectionManipulation.md b/Documentation/Books/Drivers/Java/Reference/Collection/CollectionManipulation.md deleted file mode 100644 index 60a22266aab0..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Collection/CollectionManipulation.md +++ /dev/null @@ -1,356 +0,0 @@ - -# Manipulating the collection - -These functions implement -[the HTTP API for modifying collections](../../../..//HTTP/Collection/Modifying.html). - -## ArangoDatabase.createCollection - -`ArangoDatabase.createCollection(String name, CollectionCreateOptions options) : CollectionEntity` - -Creates a collection with the given _options_ for this collection's name, -then returns collection information from the server. - -**Arguments** - -- **name**: `String` - - The name of the collection - -- **options**: `CollectionCreateOptions` - - - **journalSize**: `Long` - - The maximal size of a journal or datafile in bytes. - The value must be at least 1048576 (1 MiB). - - - **replicationFactor**: `Integer` - - (The default is 1): in a cluster, this attribute determines how many copies - of each shard are kept on different DBServers. The value 1 means that only - one copy (no synchronous replication) is kept. A value of k means that - k-1 replicas are kept. Any two copies reside on different DBServers. - Replication between them is synchronous, that is, every write operation to - the "leader" copy will be replicated to all "follower" replicas, before the - write operation is reported successful. If a server fails, this is detected - automatically and one of the servers holding copies take over, usually - without an error being reported. - - - **satellite**: `Boolean` - - If the true the collection is created as a satellite collection. - In this case the _replicationFactor_ is ignored. - - - **waitForSync**: `Boolean` - - If true then the data is synchronized to disk before returning from a - document create, update, replace or removal operation. (default: false) - - - **doCompact**: `Boolean` - - Whether or not the collection will be compacted (default is true) - - - **isVolatile**: `Boolean` - - If true then the collection data is kept in-memory only and not made persistent. - Unloading the collection will cause the collection data to be discarded. - Stopping or re-starting the server will also cause full loss of data in - the collection. Setting this option will make the resulting collection be - slightly faster than regular collections because ArangoDB does not enforce - any synchronization to disk and does not calculate any CRC checksums for - datafiles (as there are no datafiles). This option should therefore be used - for cache-type collections only, and not for data that cannot be re-created - otherwise. (The default is false) - - - **shardKeys**: `String...` - - (The default is [ "_key" ]): in a cluster, this attribute determines which - document attributes are used to determine the target shard for documents. - Documents are sent to shards based on the values of their shard key attributes. - The values of all shard key attributes in a document are hashed, and the - hash value is used to determine the target shard. Note: Values of shard key - attributes cannot be changed once set. This option is meaningless in a - single server setup. - - - **numberOfShards**: `Integer` - - (The default is 1): in a cluster, this value determines the number of shards - to create for the collection. In a single server setup, this option is meaningless. - - - **isSystem**: `Boolean` - - If true, create a system collection. In this case collection-name should - start with an underscore. End users should normally create non-system - collections only. API implementors may be required to create system collections - in very special occasions, but normally a regular collection will do. - (The default is false) - - - **type**: `CollectionType` - - (The default is _CollectionType#DOCUMENT_): the type of the collection to create. - - - **indexBuckets**: `Integer` - - The number of buckets into which indexes using a hash table are split. - The default is 16 and this number has to be a power of 2 and less than or - equal to 1024. For very large collections one should increase this to avoid - long pauses when the hash table has to be initially built or resized, since - buckets are resized individually and can be initially built in parallel. - For example, 64 might be a sensible value for a collection with - 100 000 000 documents. Currently, only the edge index respects this value, - but other index types might follow in future ArangoDB versions. - Changes (see below) are applied when the collection is loaded the next time. - - - **distributeShardsLike**: `String` - - (The default is ""): in an Enterprise Edition cluster, this attribute binds - the specifics of sharding for the newly created collection to follow that - of a specified existing collection. Note: Using this parameter has - consequences for the prototype collection. It can no longer be dropped, - before sharding imitating collections are dropped. Equally, backups and - restores of imitating collections alone will generate warnings, which can - be overridden, about missing sharding prototype. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -db.createCollection("potatoes", new CollectionCreateOptions()); -// the document collection "potatoes" now exists -``` - -## ArangoCollection.create - -`ArangoCollection.create(CollectionCreateOptions options) : CollectionEntity` - -Creates a collection with the given _options_ for this collection's name, -then returns collection information from the server. - -Alternative for [ArangoDatabase.createCollection](#arangodatabasecreatecollection). - -**Arguments** - -- **options**: `CollectionCreateOptions` - - - **journalSize**: `Long` - - The maximal size of a journal or datafile in bytes. - The value must be at least 1048576 (1 MiB). - - - **replicationFactor**: `Integer` - - (The default is 1): in a cluster, this attribute determines how many copies - of each shard are kept on different DBServers. The value 1 means that only - one copy (no synchronous replication) is kept. A value of k means that k-1 - replicas are kept. Any two copies reside on different DBServers. - Replication between them is synchronous, that is, every write operation to - the "leader" copy will be replicated to all "follower" replicas, before the - write operation is reported successful. If a server fails, this is detected - automatically and one of the servers holding copies take over, usually - without an error being reported. - - - **satellite**: `Boolean` - - If the true the collection is created as a satellite collection. - In this case the _replicationFactor_ is ignored. - - - **waitForSync**: `Boolean` - - If true then the data is synchronized to disk before returning from a - document create, update, replace or removal operation. (default: false) - - - **doCompact**: `Boolean` - - Whether or not the collection will be compacted (default is true) - - - **isVolatile**: `Boolean` - - If true then the collection data is kept in-memory only and not made persistent. - Unloading the collection will cause the collection data to be discarded. - Stopping or re-starting the server will also cause full loss of data in - the collection. Setting this option will make the resulting collection be - slightly faster than regular collections because ArangoDB does not enforce - any synchronization to disk and does not calculate any CRC checksums for - datafiles (as there are no datafiles). This option should therefore be used - for cache-type collections only, and not for data that cannot be re-created - otherwise. (The default is false) - - - **shardKeys**: `String...` - - (The default is [ "_key" ]): in a cluster, this attribute determines which - document attributes are used to determine the target shard for documents. - Documents are sent to shards based on the values of their shard key attributes. - The values of all shard key attributes in a document are hashed, and the - hash value is used to determine the target shard. Note: Values of shard key - attributes cannot be changed once set. This option is meaningless in a - single server setup. - - - **numberOfShards**: `Integer` - - (The default is 1): in a cluster, this value determines the number of shards - to create for the collection. In a single server setup, this option is meaningless. - - - **isSystem**: `Boolean` - - If true, create a system collection. In this case collection-name should - start with an underscore. End users should normally create non-system - collections only. API implementors may be required to create system collections - in very special occasions, but normally a regular collection will do. - (The default is false) - - - **type**: `CollectionType` - - (The default is _CollectionType#DOCUMENT_): the type of the collection to create. - - - **indexBuckets**: `Integer` - - The number of buckets into which indexes using a hash table are split. - The default is 16 and this number has to be a power of 2 and less than or - equal to 1024. For very large collections one should increase this to avoid - long pauses when the hash table has to be initially built or resized, since - buckets are resized individually and can be initially built in parallel. - For example, 64 might be a sensible value for a collection with - 100 000 000 documents. Currently, only the edge index respects this value, - but other index types might follow in future ArangoDB versions. - Changes (see below) are applied when the collection is loaded the next time. - - - **distributeShardsLike**: `String` - - (The default is ""): in an Enterprise Edition cluster, this attribute binds - the specifics of sharding for the newly created collection to follow that - of a specified existing collection. Note: Using this parameter has - consequences for the prototype collection. It can no longer be dropped, - before sharding imitating collections are dropped. Equally, backups and - restores of imitating collections alone will generate warnings, which can - be overridden, about missing sharding prototype. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("potatoes"); -collection.create(new CollectionCreateOptions()); -// the document collection "potatoes" now exists -``` - -## ArangoCollection.load - -`ArangoCollection.load() : CollectionEntity` - -Tells the server to load the collection into memory. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); -collection.load(); -// the collection has now been loaded into memory -``` - -## ArangoCollection.unload - -`ArangoCollection.unload() : CollectionEntity` - -Tells the server to remove the collection from memory. This call does not -delete any documents. You can use the collection afterwards; in which case -it will be loaded into memory, again. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); -collection.unload(); -// the collection has now been unloaded from memory -``` - -## ArangoCollection.changeProperties - -`ArangoCollection.changeProperties(CollectionPropertiesOptions options) : CollectionPropertiesEntity` - -Changes the properties of the collection. - -**Arguments** - -- **options**: `CollectionPropertiesEntity` - - For information on the _properties_ argument see - [the HTTP API for modifying collections](../../../..//HTTP/Collection/Modifying.html). - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -CollectionPropertiesEntity result = collection.changeProperties( - new CollectionPropertiesEntity().waitForSync(true) -); -assertThat(result.getWaitForSync(), is(true)); -// the collection will now wait for data being written to disk -// whenever a document is changed -``` - -## ArangoCollection.rename - -`ArangoCollection.rename(String newName) : CollectionEntity` - -Renames the collection - -**Arguments** - -- **newName**: `String` - - The new name - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -CollectionEntity result = collection.rename("new-collection-name") -assertThat(result.getName(), is("new-collection-name"); -// result contains additional information about the collection -``` - -## ArangoCollection.truncate - -`ArangoCollection.truncate() : CollectionEntity` - -Removes all documents from the collection, but leaves the indexes intact. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -collection.truncate(); -// the collection "some-collection" is now empty -``` - -## ArangoCollection.drop - -`ArangoCollection.drop() : void` - -Deletes the collection from the database. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -collection.drop(); -// the collection "some-collection" no longer exists -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Collection/DocumentManipulation.md b/Documentation/Books/Drivers/Java/Reference/Collection/DocumentManipulation.md deleted file mode 100644 index 496f89eb34dc..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Collection/DocumentManipulation.md +++ /dev/null @@ -1,522 +0,0 @@ - -# Manipulating documents - -These functions implement the -[HTTP API for manipulating documents](../../../..//HTTP/Document/index.html). - -## ArangoCollection.documentExists - -`ArangoCollection.documentExists(String key) : Boolean` - -Checks if the document exists by reading a single document head - -**Arguments** - -- **key**: `String` - - The key of the document - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -Boolean exists = collection.documentExists("some-key"); -``` - -## ArangoCollection.getDocument - -`ArangoCollection.getDocument(String key, Class type, DocumentReadOptions options) : T` - -Retrieves the document with the given \_key from the collection. - -**Arguments** - -- **key**: `String` - - The key of the document - -- **type**: `Class` - - The type of the document (POJO class, `VPackSlice` or `String` for JSON) - -- **options**: `DocumentReadOptions` - - - **ifNoneMatch**: `String` - - Document revision must not contain If-None-Match - - - **ifMatch**: `String` - - Document revision must contain If-Match - - - **catchException**: `Boolean` - - Whether or not catch possible thrown exceptions - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -BaseDocument document = collection.getDocument("some-key", BaseDocument.class); -``` - -## ArangoCollection.getDocuments - -`ArangoCollection.getDocuments(Collection keys, Class type) : MultiDocumentEntity` - -Retrieves multiple documents with the given \_key from the collection. - -**Arguments** - -- **keys**: `Collection` - - The key of the document - -- **type**: `Class` - - The type of the document (POJO class, `VPackSlice` or `String` for JSON) - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -Collection keys = Arrays.asList("some-key", "some-other-key"); -MultiDocumentEntity documents = collection.getDocuments(keys, BaseDocument.class); -``` - -## ArangoCollection.insertDocument - -`ArangoCollection.insertDocument(T value, DocumentCreateOptions options) : DocumentCreateEntity` - -Creates a new document from the given document, unless there is already a -document with the \_key given. If no \_key is given, a new unique \_key is -generated automatically. - -**Arguments** - -- **value**: `T` - - A representation of a single document (POJO, `VPackSlice` or `String` for JSON) - -- **options**: `DocumentCreateOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - - - **returnNew**: `Boolean` - - Return additionally the complete new document under the attribute new in the result. - - - **returnOld**: `Boolean` - - This options requires ArangoDB version 3.4.0 or higher. Additionally return - the complete old document under the attribute old in the result. - Only available if the _overwrite_ option is used. - - - **overwrite**: `Boolean` - - This options requires ArangoDB version 3.4.0 or higher. If set to true, the - insert becomes a replace-insert. If a document with the same \_key already - exists the new document is not rejected with unique constraint violated but - will replace the old document. - - - **silent**: `Boolean` - - If set to true, an empty object will be returned as response. No meta-data - will be returned for the created document. This option can be used to save - some network traffic. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -BaseDocument document = new BaseDocument(); -document.addAttribute("some", "data"); -collection.insertDocument(document, new DocumentCreateOptions()); -``` - -## ArangoCollection.insertDocuments - -`ArangoCollection.insertDocuments(Collection values, DocumentCreateOptions options) : MultiDocumentEntity>` - -Creates new documents from the given documents, unless there is already a -document with the \_key given. If no \_key is given, a new unique \_key is -generated automatically. - -**Arguments** - -- **values**: `Collection` - - A List of documents (POJO, `VPackSlice` or `String` for JSON) - -- **options**: `DocumentCreateOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - - - **returnNew**: `Boolean` - - Return additionally the complete new document under the attribute new in the result. - - - **returnOld**: `Boolean` - - This options requires ArangoDB version 3.4.0 or higher. Additionally return - the complete old document under the attribute old in the result. - Only available if the _overwrite_ option is used. - - - **overwrite**: `Boolean` - - This options requires ArangoDB version 3.4.0 or higher. If set to true, the - insert becomes a replace-insert. If a document with the same \_key already - exists the new document is not rejected with unique constraint violated but - will replace the old document. - - - **silent**: `Boolean` - - If set to true, an empty object will be returned as response. No meta-data - will be returned for the created document. This option can be used to save - some network traffic. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -BaseDocument doc1 = new BaseDocument(); -BaseDocument doc2 = new BaseDocument(); -BaseDocument doc3 = new BaseDocument(); -collection.insertDocuments( - Arrays.asList(doc1, doc2, doc3), - new DocumentCreateOptions() -); -``` - -## ArangoCollection.replaceDocument - -`ArangoCollection.replaceDocument(String key, T value, DocumentReplaceOptions options) : DocumentUpdateEntity` - -Replaces the document with _key_ with the one in the body, provided there is -such a document and no precondition is violated. - -**Arguments** - -- **key**: `String` - - The key of the document - -- **value**: `T` - - A representation of a single document (POJO, `VPackSlice` or `String` for JSON) - -- **options**: `DocumentReplaceOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - - - **ignoreRevs**: `Boolean` - - By default, or if this is set to true, the \_rev attributes in the given - document is ignored. If this is set to false, then the \_rev attribute - given in the body document is taken as a precondition. The document is - only replaced if the current revision is the one specified. - - - **ifMatch**: `String` - - Replace a document based on target revision - - - **returnNew**: `Boolean` - - Return additionally the complete new document under the attribute new in the result. - - - **returnOld**: `Boolean` - - Additionally return the complete old document under the attribute old in the result. - - - **silent**: `Boolean` - - If set to true, an empty object will be returned as response. No meta-data - will be returned for the created document. This option can be used to save - some network traffic. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -BaseDocument document = new BaseDocument(); -document.addAttribute("hello", "world"); -DocumentCreateEntity info = collection.insertDocument(document); - -document.addAttribute("hello", "world2"); -collection.replaceDocument(info.getKey(), document, new DocumentReplaceOptions()); - -BaseDocument doc = collection.getDocument(info.getKey()); -assertThat(doc.getAttribute("hello"), is("world2")); -``` - -## ArangoCollection.replaceDocuments - -`ArangoCollection.replaceDocuments(Collection values, DocumentReplaceOptions options) : MultiDocumentEntity>` - -Replaces multiple documents in the specified collection with the ones in the -values, the replaced documents are specified by the \_key attributes in the -documents in values. - -**Arguments** - -- **values**: `Collection` - - A List of documents (POJO, `VPackSlice` or `String` for JSON) - -- **options**: `DocumentReplaceOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - - - **ignoreRevs**: `Boolean` - - By default, or if this is set to true, the \_rev attributes in the given - document is ignored. If this is set to false, then the \_rev attribute - given in the body document is taken as a precondition. The document is - only replaced if the current revision is the one specified. - - - **ifMatch**: `String` - - Replace a document based on target revision - - - **returnNew**: `Boolean` - - Return additionally the complete new document under the attribute new in the result. - - - **returnOld**: `Boolean` - - Additionally return the complete old document under the attribute old in the result. - - - **silent**: `Boolean` - - If set to true, an empty object will be returned as response. No meta-data - will be returned for the created document. This option can be used to save - some network traffic. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -BaseDocument doc1 = new BaseDocument(); -BaseDocument doc2 = new BaseDocument(); -BaseDocument doc3 = new BaseDocument(); -collection.insertDocuments(Arrays.asList(doc1, doc2, doc3)); - -// change values of doc1, doc2, doc3 - -collection.replaceDocuments( - Arrays.asList(doc1, doc2, doc3), - new DocumentReplaceOptions() -); -``` - -## ArangoCollection.updateDocument - -`ArangoCollection.updateDocument(String key, T value, DocumentUpdateOptions options) : DocumentUpdateEntity` - -Updates the document with _key_ with the one in the body, provided there is -such a document and no precondition is violated. - -**Arguments** - -- **key**: `String` - - The key of the document - -- **value**: `T` - - A representation of a single document (POJO, `VPackSlice` or `String` for JSON) - -- **options**: `DocumentUpdateOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - - - **ignoreRevs**: `Boolean` - - By default, or if this is set to true, the \_rev attributes in the given - document is ignored. If this is set to false, then the \_rev attribute - given in the body document is taken as a precondition. The document is - only replaced if the current revision is the one specified. - - - **ifMatch**: `String` - - Replace a document based on target revision - - - **returnNew**: `Boolean` - - Return additionally the complete new document under the attribute new in the result. - - - **returnOld**: `Boolean` - - Additionally return the complete old document under the attribute old in the result. - - - **silent**: `Boolean` - - If set to true, an empty object will be returned as response. No meta-data - will be returned for the created document. This option can be used to save - some network traffic. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -BaseDocument document = new BaseDocument(); -document.addAttribute("hello", "world"); -DocumentCreateEntity info = collection.insertDocument(document); - -document.addAttribute("hello", "world2"); -collection.updateDocument(info.getKey(), document, new DocumentUpdateOptions()); - -BaseDocument doc = collection.getDocument(info.getKey()); -assertThat(doc.getAttribute("hello"), is("world2")); -``` - -## ArangoCollection.updateDocuments - -`ArangoCollection.updateDocuments(Collection values, DocumentUpdateOptions options) : MultiDocumentEntity>` - -Updates multiple documents in the specified collection with the ones in the -values, the replaced documents are specified by the \_key attributes in the -documents in values. - -**Arguments** - -- **values**: `Collection` - - A List of documents (POJO, `VPackSlice` or `String` for JSON) - -- **options**: `DocumentUpdateOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - - - **ignoreRevs**: `Boolean` - - By default, or if this is set to true, the \_rev attributes in the given - document is ignored. If this is set to false, then the \_rev attribute - given in the body document is taken as a precondition. The document is - only replaced if the current revision is the one specified. - - - **ifMatch**: `String` - - Replace a document based on target revision - - - **returnNew**: `Boolean` - - Return additionally the complete new document under the attribute new in the result. - - - **returnOld**: `Boolean` - - Additionally return the complete old document under the attribute old in the result. - - - **silent**: `Boolean` - - If set to true, an empty object will be returned as response. No meta-data - will be returned for the created document. This option can be used to save - some network traffic. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -BaseDocument doc1 = new BaseDocument(); -BaseDocument doc2 = new BaseDocument(); -BaseDocument doc3 = new BaseDocument(); -collection.insertDocuments(Arrays.asList(doc1, doc2, doc3)); - -// change values of doc1, doc2, doc3 - -collection.updateDocuments( - Arrays.asList(doc1, doc2, doc3), - new DocumentUpdateOptions() -); -``` - -## ArangoCollection.deleteDocument - -`ArangoCollection.deleteDocument(String key) : DocumentDeleteEntity` - -Deletes the document with the given _key_ from the collection. - -**Arguments**: - -- **key**: `String` - - The key of the document - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -BaseDocument document = new BaseDocument("some-key"); -collection.insertDocument(document); - -collection.deleteDocument("some-key"); -// document 'some-collection/some-key' no longer exists - -Boolean exists = collection.documentExists("some-key"); -assertThat(exists, is(false)); -``` - -## ArangoCollection.deleteDocuments - -`ArangoCollection.deleteDocuments(Collection values) : MultiDocumentEntity>` - -Deletes multiple documents from the collection. - -**Arguments**: - -- **values**: `Collection` - - The keys of the documents or the documents themselves - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -collection.deleteDocuments(Arrays.asList("some-key", "some-other-key"); -// documents 'some-collection/some-key' and 'some-collection/some-other-key' no longer exists -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Collection/Indexes.md b/Documentation/Books/Drivers/Java/Reference/Collection/Indexes.md deleted file mode 100644 index c17d118726f4..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Collection/Indexes.md +++ /dev/null @@ -1,240 +0,0 @@ - -# Manipulating indexes - -These functions implement the -[HTTP API for manipulating indexes](../../../..//HTTP/Indexes/index.html). - -## ArangoCollection.ensureHashIndex - -`ArangoCollection.ensureHashIndex(Iterable fields, HashIndexOptions options) : IndexEntity` - -Creates a hash index for the collection if it does not already exist. - -**Arguments** - -- **fields**: `Iterable` - - A list of attribute paths - -- **options**: `HashIndexOptions` - - - **unique**: `Boolean` - - If true, then create a unique index - - - **sparse**: `Boolean` - - If true, then create a sparse index - - - **deduplicate**: `Boolean` - - If false, the de-duplication of array values is turned off. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -IndexEntity index = collection.ensureHashIndex(Arrays.asList("a", "b.c")); -// the index has been created with the handle `index.getId()` -``` - -## ArangoCollection.ensureSkipListIndex - -`ArangoCollection.ensureSkipListIndex(Iterable fields, SkipListIndexOptions options) : IndexEntity` - -Creates a skip-list index for the collection if it does not already exist. - -**Arguments** - -- **fields**: `Iterable` - - A list of attribute paths - -- **options**: `SkipListIndexOptions` - - - **unique**: `Boolean` - - If true, then create a unique index - - - **sparse**: `Boolean` - - If true, then create a sparse index - - - **deduplicate**: `Boolean` - - If false, the de-duplication of array values is turned off. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -IndexEntity index = collection.ensureSkipListIndex( - Arrays.asList("a", "b.c") -); -// the index has been created with the handle `index.getId()` -``` - -## ArangoCollection.ensureGeoIndex - -`ArangoCollection.ensureGeoIndex(Iterable fields, GeoIndexOptions options) : IndexEntity` - -Creates a geo index for the collection if it does not already exist. - -**Arguments** - -- **fields**: `Iterable` - - A list of attribute paths - -- **options**: `GeoIndexOptions` - - - **geoJson**: `Boolean` - - If a geo-spatial index on a location is constructed and geoJson is true, - then the order within the array is longitude followed by latitude. - This corresponds to the format described in. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -IndexEntity index = collection.ensureGeoIndex( - Arrays.asList("latitude", "longitude") -); -// the index has been created with the handle `index.getId()` -``` - -## ArangoCollection.ensureFulltextIndex - -`ArangoCollection.ensureFulltextIndex(Iterable fields, FulltextIndexOptions options) : IndexEntity` - -Creates a fulltext index for the collection if it does not already exist. - -**Arguments** - -- **fields**: `Iterable` - - A list of attribute paths - -- **options**: `FulltextIndexOptions` - - - **minLength**: `Integer` - - Minimum character length of words to index. Will default to a server-defined - value if unspecified. It is thus recommended to set this value explicitly - when creating the index. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -IndexEntity index = collection.ensureFulltextIndex( - Arrays.asList("description") -); -// the index has been created with the handle `index.getId()` -``` - -## ArangoCollection.ensurePersistentIndex - -`ArangoCollection.ensurePersistentIndex(Iterable fields, PersistentIndexOptions options) : IndexEntity` - -Creates a persistent index for the collection if it does not already exist. - -**Arguments** - -- **fields**: `Iterable` - - A list of attribute paths - -- **options**: `PersistentIndexOptions` - - - **unique**: `Boolean` - - If true, then create a unique index - - - **sparse**: `Boolean` - - If true, then create a sparse index - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -IndexEntity index = collection.ensurePersistentIndex(Arrays.asList("a", "b.c")); -// the index has been created with the handle `index.getId()` -``` - -## ArangoCollection.getIndex - -`ArangoCollection.getIndex(String id) : IndexEntity` - -Fetches information about the index with the given _id_ and returns it. - -**Arguments** - -- **id**: `String` - - The index-handle - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -IndexEntity index = collection.getIndex("some-index"); -``` - -## ArangoCollection.getIndexes - -`ArangoCollection.getIndexes() : Collection` - -Fetches a list of all indexes on this collection. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -Collection indexes = collection.getIndexs(); -``` - -## ArangoCollection.deleteIndex - -`ArangoCollection.deleteIndex(String id) : String` - -Deletes the index with the given _id_ from the collection. - -**Arguments** - -- **id**: `String` - - The index-handle - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("some-collection"); - -collection.deleteIndex("some-index"); -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Collection/README.md b/Documentation/Books/Drivers/Java/Reference/Collection/README.md deleted file mode 100644 index 3968190d5c6b..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Collection/README.md +++ /dev/null @@ -1,94 +0,0 @@ - -# Collection API - -These functions implement the -[HTTP API for collections](../../../..//HTTP/Collection/index.html). - -The _ArangoCollection_ API is used for all collections, regardless of -their specific type (document/edge collection). - -## Getting information about the collection - -See -[the HTTP API documentation](../../../..//HTTP/Collection/Getting.html) -for details. - -## ArangoCollection.exists - -`ArangoCollection.exists() : boolean` - -Checks whether the collection exists - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("potatoes"); - -boolean exists = collection.exists(); -``` - -## ArangoCollection.getInfo - -`ArangoCollection.getInfo() : CollectionEntity` - -Returns information about the collection. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("potatoes"); - -CollectionEntity info = collection.getInfo(); -``` - -## ArangoCollection.getProperties - -`ArangoCollection.getProperties() : CollectionPropertiesEntity` - -Reads the properties of the specified collection. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("potatoes"); - -CollectionPropertiesEntity properties = collection.getProperties(); -``` - -## ArangoCollection.getRevision - -`ArangoCollection.getRevision() : CollectionRevisionEntity` - -Retrieve the collections revision. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("potatoes"); - -CollectionRevisionEntity revision = collection.getRevision(); -``` - -## ArangoCollection.getIndexes - -`ArangoCollection.getIndexes() : Collection` - -Fetches a list of all indexes on this collection. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("potatoes"); - -Collection indexes = collection.getIndexes(); -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Cursor.md b/Documentation/Books/Drivers/Java/Reference/Cursor.md deleted file mode 100644 index f00865442900..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Cursor.md +++ /dev/null @@ -1,357 +0,0 @@ - -# Cursor API - -_ArangoCursor_ instances provide an abstraction over the HTTP API's limitations. -Unless a method explicitly exhausts the cursor, the driver will only fetch as -many batches from the server as necessary. Like the server-side cursors, -_ArangoCursor_ instances are incrementally depleted as they are read from. - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoCursor cursor = db.query( - "FOR x IN 1..5 RETURN x", Integer.class -); -// query result list: [1, 2, 3, 4, 5] -Integer value = cursor.next(); -assertThat(value, is(1)); -// remaining result list: [2, 3, 4, 5] -``` - -## ArangoCursor.hasNext - -`ArangoCursor.hasNext() : boolean` - -Returns _true_ if the cursor has more elements in its current batch of results -or the cursor on the server has more batches. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class) -boolean hasNext = cursor.hasNext(); -``` - -## ArangoCursor.next - -`ArangoCursor.next() : T` - -Returns the next element of the query result. If the current element is the last -element of the batch and the cursor on the server provides more batches, the -next batch is fetched from the server. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class) -Integer value = cursor.next(); -assertThat(value, is(1)); -``` - -## ArangoCursor.first - -`ArangoCursor.first() : T` - -Returns the first element or {@code null} if no element exists. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoCursor cursor = db.query("RETURN 1", Integer.class) -Integer value = cursor.first(); -assertThat(value, is(1)); -``` - -## ArangoCursor.foreach - -`ArangoCursor.foreach(Consumer action) : void` - -Performs the given action for each element of the _ArangoIterable_ - -**Arguments** - -- **action**: `Consumer` - - A action to perform on the elements - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class) -cursor.foreach(e -> { - // remaining results: [1, 2, 3, 4, 5] -}); -``` - -## ArangoCursor.map - -`ArangoCursor.map(Function mapper) : ArangoIterable` - -Returns a _ArangoIterable_ consisting of the results of applying the given -function to the elements of this _ArangoIterable_. - -**Arguments** - -- **mapper**: `Function` - - A function to apply to each element - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class) -cursor.map(e -> e * 10).foreach(e -> { - // remaining results: [10, 20, 30, 40, 50] -}); -``` - -## ArangoCursor.filter - -`ArangoCursor.filter(Predicate predicate) : ArangoIterable` - -**Arguments** - -- **predicate**: `Predicate` - - A predicate to apply to each element to determine if it should be included - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class) -cursor.filter(e -> e < 4).foreach(e -> { - // remaining results: [1, 2, 3] -}); -``` - -## ArangoCursor.anyMatch - -`ArangoCursor.anyMatch(Predicate predicate) : boolean` - -Returns whether any elements of this _ArangoIterable_ match the provided predicate. - -**Arguments** - -- **predicate**: `Predicate` - - A predicate to apply to elements of this {@code ArangoIterable} - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class) -boolean match = cursor.anyMatch(e -> e == 3); -assertThat(match, is(true)); -``` - -## ArangoCursor.allMatch - -`ArangoCursor.anyMatch(Predicate predicate) : boolean` - -Returns whether all elements of this _ArangoIterable_ match the provided predicate. - -**Arguments** - -- **predicate**: `Predicate` - - A predicate to apply to elements of this {@code ArangoIterable} - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class) -boolean match = cursor.allMatch(e -> e <= 5); -assertThat(match, is(true)); -``` - -## ArangoCursor.noneMatch - -`ArangoCursor.noneMatch(Predicate predicate) : boolean` - -Returns whether no elements of this _ArangoIterable_ match the provided predicate. - -**Arguments** - -- **predicate**: `Predicate` - - A predicate to apply to elements of this {@code ArangoIterable} - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class) -boolean match = cursor.noneMatch(e -> e > 5); -assertThat(match, is(true)); -``` - -## ArangoCursor.collectInto - -`ArangoCursor.collectInto(R target) : R` - -**Arguments** - -Iterates over all elements of this {@code ArangoIterable} and adds each to -the given target. - -- **target**: `R >` - - The collection to insert into - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class) -Collection list = cursor.collectInto(new ArrayList()); -// -- or -- -Collection set = cursor.collectInto(new HashSet()); -``` - -## ArangoCursor.iterator - -`ArangoCursor.iterator() : Iterator` - -Returns an iterator over elements of the query result. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class) -Iterator iterator = cursor.iterator(); -``` - -## ArangoCursor.asListRemaining - -`ArangoCursor.asListRemaining() : List` - -Returns the remaining results as a _List_. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class) -Collection list = cursor.asListRemaining(); -``` - -## ArangoCursor.getCount - -`ArangoCursor.getCount() : Integer` - -Returns the total number of result documents available (only available if the -query was executed with the _count_ attribute set) - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", new AqlQueryOptions().count(true), Integer.class) -Integer count = cursor.getCount(); -assertThat(count, is(5)); -``` - -## ArangoCursor.count - -`ArangoCursor.count() : long` - -Returns the count of elements of this _ArangoIterable_. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class) -long count = cursor.filter(e -> e < 4).count(); -// remaining results: [1, 2, 3] -assertThat(count, is(3L)); -``` - -## ArangoCursor.getStats - -`ArangoCursor.getStats() : Stats` - -Returns extra information about the query result. For data-modification queries, -the stats will contain the number of modified documents and the number of -documents that could not be modified due to an error (if `ignoreErrors` -query option is specified). - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class) -Stats stats = cursor.getStats(); -``` - -## ArangoCursor.getWarnings - -`ArangoCursor.getWarnings() : Collection` - -Returns warnings which the query could have been produced. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class) -Collection warnings = cursor.getWarnings(); -``` - -## ArangoCursor.isCached - -`ArangoCursor.isCached() : boolean` - -Indicating whether the query result was served from the query cache or not. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class) -boolean cached = cursor.isCached(); -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Database/AqlUserFunctions.md b/Documentation/Books/Drivers/Java/Reference/Database/AqlUserFunctions.md deleted file mode 100644 index 060d9668a88c..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Database/AqlUserFunctions.md +++ /dev/null @@ -1,96 +0,0 @@ - -# Managing AQL user functions - -These functions implement the -[HTTP API for managing AQL user functions](../../../..//HTTP/AqlUserFunctions/index.html). - -## ArangoDatabase.getAqlFunctions - -`ArangoDatabase.getAqlFunctions(AqlFunctionGetOptions options) : Collection` - -**Arguments** - -- **options**: `AqlFunctionGetOptions` - - - **namespace**: `String` - - Returns all registered AQL user functions from namespace namespace - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -Collection functions = db.getAqlFunctions( - new AqlFunctionGetOptions().namespace("myfuncs") -); -// functions is a list of function descriptions -``` - -## ArangoDatabase.createAqlFunction - -`ArangoDatabase.createAqlFunction(String name, String code, AqlFunctionCreateOptions options) : void` - -**Arguments** - -- **name**: `String` - - A valid AQL function name, e.g.: `"myfuncs::accounting::calculate_vat"` - -- **code**: `String` - - A String evaluating to a JavaScript function - -- **options**: `AqlFunctionCreateOptions` - - - **isDeterministic**: `Boolean` - - An optional boolean value to indicate that the function results are fully - deterministic (function return value solely depends on the input value - and return value is the same for repeated calls with same input) - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -db.createAqlFunction("ACME::ACCOUNTING::CALCULATE_VAT", - "function (price) { return 0.19; }", - new AqlFunctionCreateOptions()); -// Use the new function in an AQL query -String query = "FOR product IN products" - + "RETURN MERGE(" - + "{vat: ACME::ACCOUNTING::CALCULATE_VAT(product.price)}, product)"; -ArangoCursor cursor = db.query(query, null, new AqlQueryOptions(), Double.class); -// cursor is a cursor for the query result -``` - -## ArangoDatabase.deleteAqlFunction - -`ArangoDatabase.deleteAqlFunction(String name, AqlFunctionDeleteOptions options): Integer` - -Deletes the AQL user function with the given name from the database. - -**Arguments** - -- **name**: `String` - - The name of the user function to delete - -- **options**: `AqlFunctionDeleteOptions` - - - **group**: `Boolean` - - If set to true, then the function name provided in name is treated as a - namespace prefix, and all functions in the specified namespace will be deleted. - If set to false, the function name provided in name must be fully qualified, - including any namespaces. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -db.deleteAqlFunction("ACME::ACCOUNTING::CALCULATE_VAT", new AqlFunctionDeleteOptions()); -// the function no longer exists -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Database/CollectionAccess.md b/Documentation/Books/Drivers/Java/Reference/Database/CollectionAccess.md deleted file mode 100644 index e014745fa208..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Database/CollectionAccess.md +++ /dev/null @@ -1,39 +0,0 @@ - -# Accessing collections - -These functions implement the -[HTTP API for accessing collections](../../../..//HTTP/Collection/Getting.html). - -## ArangoDatabase.collection - -`ArangoDatabase.collection(String name) : ArangoCollection` - -Returns a _ArangoCollection_ instance for the given collection name. - -**Arguments** - -- **name**: `String` - - Name of the collection - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCollection collection = db.collection("myCollection"); -``` - -## ArangoDatabase.getCollections - -`ArangoDatabase.getCollections() : Collection` - -Fetches all collections from the database and returns an list of collection descriptions. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -Collection infos = db.getCollections(); -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Database/DatabaseManipulation.md b/Documentation/Books/Drivers/Java/Reference/Database/DatabaseManipulation.md deleted file mode 100644 index 9cc843c22cee..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Database/DatabaseManipulation.md +++ /dev/null @@ -1,95 +0,0 @@ - -# Manipulation databases - -These functions implement the -[HTTP API for manipulating databases](../../../..//HTTP/Database/index.html). - -## ArangoDB.createDatabase - -`ArangoDB.createDatabase(String name) : Boolean` - -Creates a new database with the given name. - -**Arguments** - -- **name**: `String` - - Name of the database to create - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -arango.createDatabase("myDB"); -``` - -## ArangoDatabase.create() - -`ArangoDatabase.create() : Boolean` - -Creates the database. - -Alternative for [ArangoDB.createDatabase](#arangodbcreatedatabase). - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -db.create(); -``` - -## ArangoDatabase.exists() - -`ArangoDatabase.exists() : boolean` - -Checks whether the database exists - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -boolean exists = db.exists(); -``` - -## ArangoDatabase.getInfo - -`ArangoDatabase.getInfo() : DatabaseEntity` - -Retrieves information about the current database - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -DatabaseEntity info = db.getInfo(); -``` - -## ArangoDB.getDatabases - -`ArangoDB.getDatabases() : Collection` - -Retrieves a list of all existing databases - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -Collection names = arango.getDatabases(); -``` - -## ArangoDatabase.drop - -`ArangoDatabase.drop() : Boolean` - -Deletes the database from the server. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -db.drop(); -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Database/GraphAccess.md b/Documentation/Books/Drivers/Java/Reference/Database/GraphAccess.md deleted file mode 100644 index e04eca744482..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Database/GraphAccess.md +++ /dev/null @@ -1,25 +0,0 @@ - -# Accessing graphs - -These functions implement the -[HTTP API for accessing general graphs](../../../..//HTTP/Gharial/index.html). - -## ArangoDatabase.graph - -`ArangoDatabase.graph(String name) : ArangoGraph` - -Returns a _ArangoGraph_ instance for the given graph name. - -**Arguments** - -- **name**: `String` - - Name of the graph - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoGraph graph = db.graph("myGraph"); -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Database/HttpRoutes.md b/Documentation/Books/Drivers/Java/Reference/Database/HttpRoutes.md deleted file mode 100644 index 2d37ca3e7c68..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Database/HttpRoutes.md +++ /dev/null @@ -1,29 +0,0 @@ - -# Arbitrary HTTP routes - -## ArangoDatabase.route - -`ArangoDatabase.route(String... path) : ArangoRoute` - -Returns a new _ArangoRoute_ instance for the given path -(relative to the database) that can be used to perform arbitrary requests. - -**Arguments** - -- **path**: `String...` - - The database-relative URL of the route - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoRoute myFoxxService = db.route("my-foxx-service"); - -VPackSlice body = arango.util().serialize("{'username': 'admin', 'password': 'hunter2'"); -Response response = myFoxxService.route("users").withBody(body).post(); -// response.getBody() is the result of -// POST /_db/myDB/my-foxx-service/users -// with VelocyPack request body '{"username": "admin", "password": "hunter2"}' -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Database/Queries.md b/Documentation/Books/Drivers/Java/Reference/Database/Queries.md deleted file mode 100644 index e6d082f7e897..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Database/Queries.md +++ /dev/null @@ -1,170 +0,0 @@ - -# Queries - -This function implements the -[HTTP API for single roundtrip AQL queries](../../../..//HTTP/AqlQueryCursor/QueryResults.html). - -## ArangoDatabase.query - -`ArangoDatabase.query(String query, Map bindVars, AqlQueryOptions options, Class type) : ArangoCursor` - -Performs a database query using the given _query_ and _bindVars_, then returns -a new _ArangoCursor_ instance for the result list. - -**Arguments** - -- **query**: `String` - - An AQL query string - -- **bindVars**: `Map` - - key/value pairs defining the variables to bind the query to - -- **options**: `AqlQueryOptions` - - - **count**: `Boolean` - - Indicates whether the number of documents in the result set should be - returned in the "count" attribute of the result. Calculating the "count" - attribute might have a performance impact for some queries in the future - so this option is turned off by default, and "count" is only returned - when requested. - - - **ttl**: `Integer` - - The time-to-live for the cursor (in seconds). The cursor will be removed - on the server automatically after the specified amount of time. - This is useful to ensure garbage collection of cursors that are not fully - fetched by clients. If not set, a server-defined value will be used. - - - **batchSize**: `Integer` - - Maximum number of result documents to be transferred from the server to - the client in one roundtrip. If this attribute is not set, a server-controlled - default value will be used. A batchSize value of 0 is disallowed. - - - **memoryLimit**: `Long` - - The maximum number of memory (measured in bytes) that the query is allowed - to use. If set, then the query will fail with error "resource limit exceeded" - in case it allocates too much memory. A value of 0 indicates that there is - no memory limit. - - - **cache**: `Boolean` - - Flag to determine whether the AQL query cache shall be used. - If set to false, then any query cache lookup will be skipped for the query. - If set to true, it will lead to the query cache being checked for the query - if the query cache mode is either on or demand. - - - **failOnWarning**: `Boolean` - - When set to true, the query will throw an exception and abort instead of - producing a warning. This option should be used during development to catch - potential issues early. When the attribute is set to false, warnings will - not be propagated to exceptions and will be returned with the query result. - There is also a server configuration option `--query.fail-on-warning` for - setting the default value for failOnWarning so it does not need to be set - on a per-query level. - - - **profile**: `Boolean` - - If set to true, then the additional query profiling information will be - returned in the sub-attribute profile of the extra return attribute if the - query result is not served from the query cache. - - - **maxTransactionSize**: `Long` - - Transaction size limit in bytes. Honored by the RocksDB storage engine only. - - - **maxWarningCount**: `Long` - - Limits the maximum number of warnings a query will return. The number of - warnings a query will return is limited to 10 by default, but that number - can be increased or decreased by setting this attribute. - - - **intermediateCommitCount**: `Long` - - Maximum number of operations after which an intermediate commit is - performed automatically. Honored by the RocksDB storage engine only. - - - **intermediateCommitSize**: `Long` - - Maximum total size of operations after which an intermediate commit is - performed automatically. Honored by the RocksDB storage engine only. - - - **satelliteSyncWait**: `Double` - - This Enterprise Edition parameter allows to configure how long a DBServer - will have time to bring the satellite collections involved in the query - into sync. The default value is 60.0 (seconds). When the max time has been - reached the query will be stopped. - - - **skipInaccessibleCollections** - - AQL queries (especially graph traversals) will treat collection to which a - user has no access rights as if these collections were empty. Instead of - returning a forbidden access error, your queries will execute normally. - This is intended to help with certain use-cases: A graph contains several - collections and different users execute AQL queries on that graph. - You can now naturally limit the accessible results by changing the - access rights of users on collections. This feature is only available in - the Enterprise Edition. - - - **fullCount**: `Boolean` - - If set to true and the query contains a LIMIT clause, then the result will - have an extra attribute with the sub-attributes stats and fullCount, - `{ ... , "extra": { "stats": { "fullCount": 123 } } }`. - The fullCount attribute will contain the number of documents in the result - before the last LIMIT in the query was applied. It can be used to count the - number of documents that match certain filter criteria, but only return a - subset of them, in one go. It is thus similar to MySQL's SQL_CALC_FOUND_ROWS hint. - Note that setting the option will disable a few LIMIT optimizations and may - lead to more documents being processed, and thus make queries run longer. - Note that the fullCount attribute will only be present in the result if the - query has a LIMIT clause and the LIMIT clause is actually used in the query. - - - **maxPlans**: `Integer` - - Limits the maximum number of plans that are created by the AQL query optimizer. - - - **rules**: `Collection` - - A list of to-be-included or to-be-excluded optimizer rules can be put into - this attribute, telling the optimizer to include or exclude specific rules. - To disable a rule, prefix its name with a `-`, to enable a rule, prefix it - with a `+`. There is also a pseudo-rule all, which will match all optimizer rules. - - - **stream**: `Boolean` - - Specify true and the query will be executed in a streaming fashion. - The query result is not stored on the server, but calculated on the fly. - Beware: long-running queries will need to hold the collection locks for as - long as the query cursor exists. When set to false a query will be executed - right away in its entirety. In that case query results are either returned - right away (if the resultset is small enough), or stored on the arangod - instance and accessible via the cursor API (with respect to the TTL). - It is advisable to only use this option on short-running queries or without - exclusive locks (write-locks on MMFiles). Please note that the query options - cache, count and fullCount will not work on streaming queries. Additionally - query statistics, warnings and profiling data will only be available after - the query is finished. The default value is false. - -- **type**: `Class` - - The type of the result (POJO class, `VPackSlice`, `String` for JSON, or `Collection`/`List`/`Map`) - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoCursor cursor = db.query( - "FOR i IN @@collection RETURN i" - new MapBuilder().put("@collection", "myCollection").get(), - new AqlQueryOptions(), - BaseDocument.class -); -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Database/README.md b/Documentation/Books/Drivers/Java/Reference/Database/README.md deleted file mode 100644 index 9709a21cf599..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Database/README.md +++ /dev/null @@ -1,21 +0,0 @@ - -# Database API - -## ArangoDB.db - -`ArangoDB.db(String name) : ArangoDatabase` - -Returns a _ArangoDatabase_ instance for the given database name. - -**Arguments** - -- **name**: `String` - - Name of the database - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Database/Transactions.md b/Documentation/Books/Drivers/Java/Reference/Database/Transactions.md deleted file mode 100644 index 1b7f9b5e3a9c..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Database/Transactions.md +++ /dev/null @@ -1,39 +0,0 @@ - -# Transactions - -This function implements the -[HTTP API for transactions](../../../..//HTTP/Transaction/index.html). - -## ArangoDatabase.transaction - -`ArangoDatabase.transaction(String action, Class type, TransactionOptions options) : T` - -Performs a server-side transaction and returns its return value. - -**Arguments** - -- **action**: `String` - - A String evaluating to a JavaScript function to be executed on the server. - -- **type**: `Class` - - The type of the result (POJO class, `VPackSlice` or `String` for JSON) - -- **options**: `TransactionOptions` - - Additional transaction options - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -String action = "function (params) {" - + "const db = require('@arangodb').db;" - + "return db._query('FOR i IN test RETURN i._key').toArray();" - + "}"; -String[] keys = arango.db().transaction( - action, String[].class, new TransactionOptions() -); -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Database/ViewAccess.md b/Documentation/Books/Drivers/Java/Reference/Database/ViewAccess.md deleted file mode 100644 index 85b97b768162..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Database/ViewAccess.md +++ /dev/null @@ -1,59 +0,0 @@ - -# Accessing views - -These functions implement the -[HTTP API for accessing view](../../../..//HTTP/Views/Getting.html). - -## ArangoDatabase.view - -`ArangoDatabase.view(String name) : ArangoView` - -Returns a _ArangoView_ instance for the given view name. - -**Arguments** - -- **name**: `String` - - Name of the view - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoView view = db.view("myView"); -``` - -## ArangoDatabase.arangoSearch - -`ArangoDatabase.arangoSearch(String name) : ArangoSearch` - -Returns a _ArangoSearch_ instance for the given ArangoSearch view name. - -**Arguments** - -- **name**: `String` - - Name of the view - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoSearch view = db.arangoSearch("myArangoSearchView"); -``` - -## ArangoDatabase.getViews - -`ArangoDatabase.getViews() : Collection` - -Fetches all views from the database and returns an list of collection descriptions. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -Collection infos = db.getViews(); -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Graph/EdgeCollection.md b/Documentation/Books/Drivers/Java/Reference/Graph/EdgeCollection.md deleted file mode 100644 index dcce8bcfed0e..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Graph/EdgeCollection.md +++ /dev/null @@ -1,127 +0,0 @@ - -# Manipulating the edge collection - -## ArangoGraph.edgeCollection - -`ArangoGraph.edgeCollection(String name) : ArangoEdgeCollection` - -Returns a _ArangoEdgeCollection_ instance for the given edge collection name. - -**Arguments** - -- **name**: `String` - - Name of the edge collection - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoGraph graph = db.graph("some-graph"); - -ArangoEdgeCollection collection = graph.edgeCollection("some-edge-collection"); -``` - -## ArangoGraph.getEdgeDefinitions - -`ArangoGraph.getEdgeDefinitions() : Collection` - -Fetches all edge collections from the graph and returns a list of collection names. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoGraph graph = db.graph("some-graph"); - -Collection collections = graph.getEdgeDefinitions(); -``` - -## ArangoGraph.addEdgeDefinition - -`ArangoGraph.addEdgeDefinition(EdgeDefinition definition) : GraphEntity` - -Adds the given edge definition to the graph. - -**Arguments** - -- **definition**: `EdgeDefinition` - - The edge definition - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoGraph graph = db.graph("some-graph"); - -EdgeDefinition edgeDefinition = new EdgeDefinition() - .collection("edges") - .from("start-vertices") - .to("end-vertices"); -graph.addEdgeDefinition(edgeDefinition); -// the edge definition has been added to the graph -``` - -## ArangoGraph.replaceEdgeDefinition - -`ArangoGraph.replaceEdgeDefinition(EdgeDefinition definition) : GraphEntity` - -Change one specific edge definition. This will modify all occurrences of this -definition in all graphs known to your database. - -**Arguments** - -- **definition**: `EdgeDefinition` - - The edge definition - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoGraph graph = db.graph("some-graph"); - -EdgeDefinition edgeDefinition = new EdgeDefinition() - .collection("edges") - .from("start-vertices") - .to("end-vertices"); -graph.replaceEdgeDefinition(edgeDefinition); -// the edge definition has been modified -``` - -## ArangoGraph.removeEdgeDefinition - -`ArangoGraph.removeEdgeDefinition(String definitionName) : GraphEntity` - -Remove one edge definition from the graph. This will only remove the -edge collection, the vertex collections remain untouched and can still -be used in your queries. - -**Arguments** - -- **definitionName**: `String` - - The name of the edge collection used in the definition - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoGraph graph = db.graph("some-graph"); - -EdgeDefinition edgeDefinition = new EdgeDefinition() - .collection("edges") - .from("start-vertices") - .to("end-vertices"); -graph.addEdgeDefinition(edgeDefinition); -// the edge definition has been added to the graph - -graph.removeEdgeDefinition("edges"); -// the edge definition has been removed -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Graph/Edges.md b/Documentation/Books/Drivers/Java/Reference/Graph/Edges.md deleted file mode 100644 index ecff0ad3b501..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Graph/Edges.md +++ /dev/null @@ -1,182 +0,0 @@ - -# Manipulating edges - -## ArangoEdgeCollection.getEdge - -`ArangoEdgeCollection.getEdge(String key, Class type, DocumentReadOptions options) : T` - -Retrieves the edge document with the given `key` from the collection. - -**Arguments** - -- **key**: `String` - - The key of the edge - -- **type**: `Class` - - The type of the edge-document (POJO class, `VPackSlice` or `String` for JSON) - -- **options**: `DocumentReadOptions` - - - **ifNoneMatch**: `String` - - Document revision must not contain If-None-Match - - - **ifMatch**: `String` - - Document revision must contain If-Match - - - **catchException**: `Boolean` - - Whether or not catch possible thrown exceptions - -## ArangoEdgeCollection.insertEdge - -`ArangoEdgeCollection.insertEdge(T value, EdgeCreateOptions options) : EdgeEntity` - -Creates a new edge in the collection. - -**Arguments** - -- **value**: `T` - - A representation of a single edge (POJO, `VPackSlice` or `String` for JSON) - -- **options**: `EdgeCreateOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoGraph graph = db.graph("some-graph"); -ArangoEdgeCollection collection = graph.edgeCollection("some-edge-collection"); - -BaseEdgeDocument document = new BaseEdgeDocument("some-from-key", "some-to-key"); -document.addAttribute("some", "data"); -collection.insertEdge(document, new EdgeCreateOptions()); -``` - -## ArangoEdgeCollection.replaceEdge - -`ArangoEdgeCollection.replaceEdge(String key, T value, EdgeReplaceOptions options) : EdgeUpdateEntity` - -Replaces the edge with key with the one in the body, provided there is such -a edge and no precondition is violated. - -**Arguments** - -- **key**: `String` - - The key of the edge - -- **value**: `T` - - A representation of a single edge (POJO, `VPackSlice` or `String` for JSON) - -- **options**: `EdgeReplaceOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - - - **ifMatch**: `String` - - Replace a document based on target revision - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoGraph graph = db.graph("some-graph"); -ArangoEdgeCollection collection = graph.edgeCollection("some-edge-collection"); - -BaseEdgeDocument document = new BaseEdgeDocument("some-from-key", "some-to-key"); -collection.replaceEdge("some-key", document, new EdgeReplaceOptions()); -``` - -## ArangoEdgeCollection.updateEdge - -`ArangoEdgeCollection.updateEdge(String key, T value, EdgeUpdateOptions options) : EdgeUpdateEntity` - -Updates the edge with key with the one in the body, provided there is such a -edge and no precondition is violated. - -**Arguments** - -- **key**: `String` - - The key of the edge - -- **value**: `T` - - A representation of a single edge (POJO, `VPackSlice` or `String` for JSON) - -- **options**: `EdgeUpdateOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - - - **ifMatch**: `String` - - Update a document based on target revision - - - **keepNull**: `Boolean` - - If the intention is to delete existing attributes with the patch command, - the URL query parameter keepNull can be used with a value of false. - This will modify the behavior of the patch command to remove any attributes - from the existing document that are contained in the patch document with an - attribute value of null. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoGraph graph = db.graph("some-graph"); -ArangoEdgeCollection collection = graph.edgeCollection("some-edge-collection"); - -BaseEdgeDocument document = new BaseEdgeDocument("some-from-key", "some-to-key"); -collection.updateEdge("some-key", document, new EdgeUpdateOptions()); -``` - -## ArangoEdgeCollection.deleteEdge - -`ArangoEdgeCollection.deleteEdge(String key, EdgeDeleteOptions options) : void` - -Deletes the edge with the given _key_ from the collection. - -**Arguments** - -- **key**: `String` - - The key of the edge - -- **options** : `EdgeDeleteOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - - - **ifMatch**: `String` - - Remove a document based on target revision - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoGraph graph = db.graph("some-graph"); -ArangoEdgeCollection collection = graph.edgeCollection("some-edge-collection"); - -collection.deleteEdge("some-key", new EdgeDeleteOptions()); -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Graph/README.md b/Documentation/Books/Drivers/Java/Reference/Graph/README.md deleted file mode 100644 index 7519c0d0a779..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Graph/README.md +++ /dev/null @@ -1,190 +0,0 @@ - -# Graph API - -These functions implement the -[HTTP API for manipulating graphs](../../../..//HTTP/Gharial/index.html). - -## ArangoDatabase.createGraph - -`ArangoDatabase.createGraph(String name, Collection edgeDefinitions, GraphCreateOptions options) : GraphEntity` - -Create a new graph in the graph module. The creation of a graph requires the -name of the graph and a definition of its edges. - -**Arguments** - -- **name**: `String` - - Name of the graph - -- **edgeDefinitions**: `Collection` - - An array of definitions for the edge - -- **options**: `GraphCreateOptions` - - - **orphanCollections**: `String...` - - Additional vertex collections - - - **isSmart**: `Boolean` - - Define if the created graph should be smart. - This only has effect in Enterprise Edition. - - - **replicationFactor**: `Integer` - - (The default is 1): in a cluster, this attribute determines how many copies - of each shard are kept on different DBServers. The value 1 means that only - one copy (no synchronous replication) is kept. A value of k means that k-1 - replicas are kept. Any two copies reside on different DBServers. - Replication between them is synchronous, that is, every write operation to - the "leader" copy will be replicated to all "follower" replicas, before the - write operation is reported successful. If a server fails, this is detected - automatically and one of the servers holding copies take over, usually - without an error being reported. - - - **numberOfShards**: `Integer` - - The number of shards that is used for every collection within this graph. - Cannot be modified later. - - - **smartGraphAttribute**: `String` - - The attribute name that is used to smartly shard the vertices of a graph. - Every vertex in this Graph has to have this attribute. Cannot be modified later. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -EdgeDefinition edgeDefinition = new EdgeDefinition() - .collection("edges") - .from("start-vertices") - .to("end-vertices"); -GraphEntity graph = db.createGraph( - "some-graph", Arrays.asList(edgeDefinition), new GraphCreateOptions() -); -// graph now exists -``` - -## ArangoGraph.create - -`ArangoGraph.create(Collection edgeDefinitions, GraphCreateOptions options) : GraphEntity` - -Create a new graph in the graph module. The creation of a graph requires the -name of the graph and a definition of its edges. - -Alternative for [ArangoDatabase.createGraph](#arangodatabasecreategraph). - -**Arguments** - -- **edgeDefinitions**: `Collection` - - An array of definitions for the edge - -- **options**: `GraphCreateOptions` - - - **orphanCollections**: `String...` - - Additional vertex collections - - - **isSmart**: `Boolean` - - Define if the created graph should be smart. - This only has effect in Enterprise Edition. - - - **replicationFactor**: `Integer` - - (The default is 1): in a cluster, this attribute determines how many copies - of each shard are kept on different DBServers. The value 1 means that only - one copy (no synchronous replication) is kept. A value of k means that k-1 - replicas are kept. Any two copies reside on different DBServers. - Replication between them is synchronous, that is, every write operation to - the "leader" copy will be replicated to all "follower" replicas, before the - write operation is reported successful. If a server fails, this is detected - automatically and one of the servers holding copies take over, usually - without an error being reported. - - - **numberOfShards**: `Integer` - - The number of shards that is used for every collection within this graph. - Cannot be modified later. - - - **smartGraphAttribute**: `String` - - The attribute name that is used to smartly shard the vertices of a graph. - Every vertex in this Graph has to have this attribute. Cannot be modified later. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoGraph graph = db.graph("some-graph"); -EdgeDefinition edgeDefinition = new EdgeDefinition() - .collection("edges") - .from("start-vertices") - .to("end-vertices"); -graph.create(Arrays.asList(edgeDefinition), new GraphCreateOptions()); -// graph now exists -``` - -## ArangoGraph.exists - -`ArangoGraph.exists() : boolean` - -Checks whether the graph exists - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoGraph graph = db.graph("some-graph"); -boolean exists = graph.exists(); -``` - -## ArangoGraph.getInfo - -`ArangoGraph.getInfo() : GraphEntity` - -Retrieves general information about the graph. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoGraph graph = db.graph("some-graph"); -GraphEntity info = graph.getInfo(); -``` - -## ArangoGraph.drop - -`ArangoGraph.drop(boolean dropCollections) : void` - -Deletes the graph from the database. - -**Arguments** - -- **dropCollections**: `boolean` - - Drop collections of this graph as well. Collections will only be dropped if - they are not used in other graphs. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoGraph graph = db.graph("some-graph"); -graph.drop(); -// the graph "some-graph" no longer exists -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Graph/VertexCollection.md b/Documentation/Books/Drivers/Java/Reference/Graph/VertexCollection.md deleted file mode 100644 index 41877172a2cf..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Graph/VertexCollection.md +++ /dev/null @@ -1,63 +0,0 @@ - -# Manipulating the vertex collection - -## ArangoGraph.vertexCollection - -`ArangoGraph.vertexCollection(String name) : ArangoVertexCollection` - -Returns a _ArangoVertexCollection_ instance for the given vertex collection name. - -**Arguments** - -- **name**: `String` - - Name of the vertex collection - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoGraph graph = db.graph("some-graph"); - -ArangoVertexCollection collection = graph.vertexCollection("some-vertex-collection"); -``` - -## ArangoGraph.getVertexCollections - -`ArangoGraph.getVertexCollections() : Collection` - -Fetches all vertex collections from the graph and returns a list of collection names. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoGraph graph = db.graph("some-graph"); - -Collection collections = graph.getVertexCollections(); -``` - -## ArangoGraph.addVertexCollection - -`ArangoGraph.addVertexCollection(String name) : GraphEntity` - -Adds a vertex collection to the set of collections of the graph. -If the collection does not exist, it will be created. - -**Arguments** - -- **name**: `String` - - Name of the vertex collection - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoGraph graph = db.graph("some-graph"); - -graph.addVertexCollection("some-other-collection"); -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Graph/Vertices.md b/Documentation/Books/Drivers/Java/Reference/Graph/Vertices.md deleted file mode 100644 index 0fd634214808..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Graph/Vertices.md +++ /dev/null @@ -1,182 +0,0 @@ - -# Manipulating vertices - -## ArangoVertexCollection.getVertex - -`ArangoVertexCollection.getVertex(String key, Class type, DocumentReadOptions options) : T` - -Retrieves the vertex document with the given `key` from the collection. - -**Arguments** - -- **key**: `String` - - The key of the vertex - -- **type**: `Class` - - The type of the vertex-document (POJO class, `VPackSlice` or `String` for JSON) - -- **options**: `DocumentReadOptions` - - - **ifNoneMatch**: `String` - - Document revision must not contain If-None-Match - - - **ifMatch**: `String` - - Document revision must contain If-Match - - - **catchException**: `Boolean` - - Whether or not catch possible thrown exceptions - -## ArangoVertexCollection.insertVertex - -`ArangoVertexCollection.insertVertex(T value, VertexCreateOptions options) : VertexEntity` - -Creates a new vertex in the collection. - -**Arguments** - -- **value**: `T` - - A representation of a single vertex (POJO, `VPackSlice` or `String` for JSON) - -- **options**: `VertexCreateOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoGraph graph = db.graph("some-graph"); -ArangoVertexCollection collection = graph.vertexCollection("some-vertex-collection"); - -BaseDocument document = new BaseDocument(); -document.addAttribute("some", "data"); -collection.insertVertex(document, new VertexCreateOptions()); -``` - -## ArangoVertexCollection.replaceVertex - -`ArangoVertexCollection.replaceVertex(String key, T value, VertexReplaceOptions options) : VertexUpdateEntity` - -Replaces the vertex with key with the one in the body, provided there is such -a vertex and no precondition is violated. - -**Arguments** - -- **key**: `String` - - The key of the vertex - -- **value**: `T` - - A representation of a single vertex (POJO, `VPackSlice` or `String` for JSON) - -- **options**: `VertexReplaceOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - - - **ifMatch**: `String` - - Replace a document based on target revision - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoGraph graph = db.graph("some-graph"); -ArangoVertexCollection collection = graph.vertexCollection("some-vertex-collection"); - -BaseDocument document = new BaseDocument(); -collection.replaceVertex("some-key", document, new VertexReplaceOptions()); -``` - -## ArangoVertexCollection.updateVertex - -`ArangoVertexCollection.updateVertex(String key, T value, VertexUpdateOptions options) : VertexUpdateEntity` - -Updates the vertex with key with the one in the body, provided there is such -a vertex and no precondition is violated. - -**Arguments** - -- **key**: `String` - - The key of the vertex - -- **value**: `T` - - A representation of a single vertex (POJO, `VPackSlice` or `String` for JSON) - -- **options**: `VertexUpdateOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - - - **ifMatch**: `String` - - Update a document based on target revision - - - **keepNull**: `Boolean` - - If the intention is to delete existing attributes with the patch command, - the URL query parameter keepNull can be used with a value of false. - This will modify the behavior of the patch command to remove any attributes - from the existing document that are contained in the patch document with - an attribute value of null. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoGraph graph = db.graph("some-graph"); -ArangoVertexCollection collection = graph.vertexCollection("some-vertex-collection"); - -BaseDocument document = new BaseDocument(); -collection.updateVertex("some-key", document, new VertexUpdateOptions()); -``` - -## ArangoVertexCollection.deleteVertex - -`ArangoVertexCollection.deleteVertex(String key, VertexDeleteOptions options) : void` - -Deletes the vertex with the given _key_ from the collection. - -**Arguments** - -- **key**: `String` - - The key of the vertex - -- **options** : `VertexDeleteOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - - - **ifMatch**: `String` - - Remove a document based on target revision - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoGraph graph = db.graph("some-graph"); -ArangoVertexCollection collection = graph.vertexCollection("some-vertex-collection"); - -collection.deleteVertex("some-key", new VertexDeleteOptions()); -``` diff --git a/Documentation/Books/Drivers/Java/Reference/README.md b/Documentation/Books/Drivers/Java/Reference/README.md deleted file mode 100644 index c986f37b1b5e..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/README.md +++ /dev/null @@ -1,29 +0,0 @@ - -# ArangoDB Java Driver - Reference - -- [Driver Setup](Setup.md) -- [Database](Database/README.md) - - [Database Manipulation](Database/DatabaseManipulation.md) - - [Collection Access](Database/CollectionAccess.md) - - [View Access](Database/ViewAccess.md) - - [Queries](Database/Queries.md) - - [AQL User Functions](Database/AqlUserFunctions.md) - - [Transactions](Database/Transactions.md) - - [Graph Access](Database/GraphAccess.md) - - [HTTP Routes](Database/HttpRoutes.md) -- [Collection](Collection/README.md) - - [Collection Manipulation](Collection/CollectionManipulation.md) - - [Document Manipulation](Collection/DocumentManipulation.md) - - [Indexes](Collection/Indexes.md) - - [Bulk Import](Collection/BulkImport.md) -- [View](View/README.md) - - [View Manipulation](View/ViewManipulation.md) - - [ArangoSearch Views](View/ArangoSearch.md) -- [Cursor](Cursor.md) -- [Graph](Graph/README.md) - - [Vertex Collection](Graph/VertexCollection.md) - - [Edge Collection](Graph/EdgeCollection.md) - - [Vertices Manipulation](Graph/Vertices.md) - - [Edges Manipulation](Graph/Edges.md) -- [Route](Route.md) -- [Serialization](Serialization.md) diff --git a/Documentation/Books/Drivers/Java/Reference/Route.md b/Documentation/Books/Drivers/Java/Reference/Route.md deleted file mode 100644 index ad5659950604..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Route.md +++ /dev/null @@ -1,276 +0,0 @@ - -# Route API - -_ArangoRoute_ instances provide access for arbitrary HTTP requests. -This allows easy access to Foxx services and other HTTP APIs not covered -by the driver itself. - -## ArangoRoute.route - -`ArangoRoute.route(String... path) : ArangoRoute` - -Returns a new _ArangoRoute_ instance for the given path (relative to the -current route) that can be used to perform arbitrary requests. - -**Arguments** - -- **path**: `String...` - - The relative URL of the route - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoRoute route = db.route("my-foxx-service"); -ArangoRoute users = route.route("users"); -// equivalent to db.route("my-foxx-service/users") -// or db.route("my-foxx-service", "users") -``` - -## ArangoRoute.withHeader - -`ArangoRoute.withHeader(String key, Object value) : ArangoRoute` - -Header that should be sent with each request to the route. - -**Arguments** - -- **key**: `String` - - Header key - -- **value**: `Object` - - Header value (the _toString()_ method will be called for the value} - -## ArangoRoute.withQueryParam - -`ArangoRoute.withQueryParam(String key, Object value) : ArangoRoute` - -Query parameter that should be sent with each request to the route. - -**Arguments** - -- **key**: `String` - - Query parameter key - -- **value**: `Object` - - Query parameter value (the _toString()_ method will be called for the value} - -## ArangoRoute.withBody - -`ArangoRoute.withBody(Object body) : ArangoRoute` - -The response body. The body will be serialized to _VPackSlice_. - -**Arguments** - -- **body**: `Object` - - The request body - -## ArangoRoute.delete - -`ArangoRoute.delete() : Response` - -Performs a DELETE request to the given URL and returns the server response. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoRoute route = db.route("my-foxx-service"); -ArangoRoute route = route.delete() -// response.getBody() is the response body of calling -// DELETE _db/_system/my-foxx-service - -// -- or -- - -ArangoRoute route = route.route("users/admin").delete() -// response.getBody() is the response body of calling -// DELETE _db/_system/my-foxx-service/users/admin - -// -- or -- - -ArangoRoute route = route.route("users/admin").withQueryParam("permanent", true).delete() -// response.getBody() is the response body of calling -// DELETE _db/_system/my-foxx-service/users/admin?permanent=true -``` - -## ArangoRoute.get - -`ArangoRoute.get() : Response` - -Performs a GET request to the given URL and returns the server response. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoRoute route = db.route("my-foxx-service"); -Response response = route.get(); -// response.getBody() is the response body of calling -// GET _db/_system/my-foxx-service - -// -- or -- - -Response response = route.route("users").get(); -// response.getBody() is the response body of calling -// GET _db/_system/my-foxx-service/users - -// -- or -- - -Response response = route.route("users").withQueryParam("group", "admin").get(); -// response.getBody() is the response body of calling -// GET _db/_system/my-foxx-service/users?group=admin -``` - -## ArangoRoute.head - -`ArangoRoute.head() : Response` - -Performs a HEAD request to the given URL and returns the server response. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoRoute route = db.route("my-foxx-service"); -ArangoRoute route = route.head(); -// response is the response object for -// HEAD _db/_system/my-foxx-service -``` - -## ArangoRoute.patch - -`ArangoRoute.patch() : Response` - -Performs a PATCH request to the given URL and returns the server response. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoRoute route = db.route("my-foxx-service"); -ArangoRoute route = route.patch(); -// response.getBody() is the response body of calling -// PATCH _db/_system/my-foxx-service - -// -- or -- - -ArangoRoute route = route.route("users/admin").patch(); -// response.getBody() is the response body of calling -// PATCH _db/_system/my-foxx-service/users - -// -- or -- - -VPackSlice body = arango.util().serialize("{ password: 'hunter2' }"); -ArangoRoute route = route.route("users/admin").withBody(body).patch(); -// response.getBody() is the response body of calling -// PATCH _db/_system/my-foxx-service/users/admin -// with JSON request body {"password": "hunter2"} - -// -- or -- - -VPackSlice body = arango.util().serialize("{ password: 'hunter2' }"); -ArangoRoute route = route.route("users/admin") - .withBody(body).withQueryParam("admin", true).patch(); -// response.getBody() is the response body of calling -// PATCH _db/_system/my-foxx-service/users/admin?admin=true -// with JSON request body {"password": "hunter2"} -``` - -## ArangoRoute.post - -`ArangoRoute.post() : Response` - -Performs a POST request to the given URL and returns the server response. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoRoute route = db.route("my-foxx-service"); -ArangoRoute route = route.post() -// response.getBody() is the response body of calling -// POST _db/_system/my-foxx-service - -// -- or -- - -ArangoRoute route = route.route("users").post() -// response.getBody() is the response body of calling -// POST _db/_system/my-foxx-service/users - -// -- or -- - -VPackSlice body = arango.util().serialize("{ password: 'hunter2' }"); -ArangoRoute route = route.route("users").withBody(body).post(); -// response.getBody() is the response body of calling -// POST _db/_system/my-foxx-service/users -// with JSON request body {"username": "admin", "password": "hunter2"} - -// -- or -- - -VPackSlice body = arango.util().serialize("{ password: 'hunter2' }"); -ArangoRoute route = route.route("users") - .withBody(body).withQueryParam("admin", true).post(); -// response.getBody() is the response body of calling -// POST _db/_system/my-foxx-service/users?admin=true -// with JSON request body {"username": "admin", "password": "hunter2"} -``` - -## ArangoRoute.put - -`ArangoRoute.put() : Response` - -Performs a PUT request to the given URL and returns the server response. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); - -ArangoRoute route = db.route("my-foxx-service"); -ArangoRoute route = route.put(); -// response.getBody() is the response body of calling -// PUT _db/_system/my-foxx-service - -// -- or -- - -ArangoRoute route = route.route("users/admin").put(); -// response.getBody() is the response body of calling -// PUT _db/_system/my-foxx-service/users - -// -- or -- - -VPackSlice body = arango.util().serialize("{ password: 'hunter2' }"); -ArangoRoute route = route.route("users/admin").withBody(body).put(); -// response.getBody() is the response body of calling -// PUT _db/_system/my-foxx-service/users/admin -// with JSON request body {"username": "admin", "password": "hunter2"} - -// -- or -- - -VPackSlice body = arango.util().serialize("{ password: 'hunter2' }"); -ArangoRoute route = route.route("users/admin") - .withBody(body).withQueryParam("admin", true).put(); -// response.getBody() is the response body of calling -// PUT _db/_system/my-foxx-service/users/admin?admin=true -// with JSON request body {"username": "admin", "password": "hunter2"} -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Serialization.md b/Documentation/Books/Drivers/Java/Reference/Serialization.md deleted file mode 100644 index 76988b907561..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Serialization.md +++ /dev/null @@ -1,253 +0,0 @@ - -# Serialization - -## VelocyPack serialization - -Since version `4.1.11` you can extend the VelocyPack serialization by -registering additional `VPackModule`s on `ArangoDB.Builder`. - -### Java 8 types - -GitHub: https://github.com/arangodb/java-velocypack-module-jdk8 - -Added support for: - -- `java.time.Instant` -- `java.time.LocalDate` -- `java.time.LocalDateTime` -- `java.time.ZonedDateTime` -- `java.time.OffsetDateTime` -- `java.time.ZoneId` -- `java.util.Optional` -- `java.util.OptionalDouble` -- `java.util.OptionalInt` -- `java.util.OptionalLong` - -```XML - - - com.arangodb - velocypack-module-jdk8 - 1.1.0 - - -``` - -```Java -ArangoDB arangoDB = new ArangoDB.Builder().registerModule(new VPackJdk8Module()).build(); -``` - -### Scala types - -GitHub: https://github.com/arangodb/java-velocypack-module-scala - -Added support for: - -- `scala.Option` -- `scala.collection.immutable.List` -- `scala.collection.immutable.Map` -- `scala.math.BigInt` -- `scala.math.BigDecimal` - -```XML - - - com.arangodb - velocypack-module-scala - 1.0.2 - - -``` - -```Scala -val arangoDB: ArangoDB = new ArangoDB.Builder().registerModule(new VPackScalaModule).build -``` - -### Joda-Time - -GitHub: https://github.com/arangodb/java-velocypack-module-joda - -Added support for: - -- `org.joda.time.DateTime` -- `org.joda.time.Instant` -- `org.joda.time.LocalDate` -- `org.joda.time.LocalDateTime` - -```XML - - - com.arangodb - velocypack-module-joda - 1.1.1 - - -``` - -```Java -ArangoDB arangoDB = new ArangoDB.Builder().registerModule(new VPackJodaModule()).build(); -``` - -## Use of jackson as an alternative serializer - -Since version 4.5.2, the driver supports alternative serializer to de-/serialize -documents, edges and query results. One implementation is -[VelocyJack](https://github.com/arangodb/jackson-dataformat-velocypack#within-arangodb-java-driver) -which is based on [Jackson](https://github.com/FasterXML/jackson) working with -[jackson-dataformat-velocypack](https://github.com/arangodb/jackson-dataformat-velocypack). - -**Note**: Any registered custom [serializer/deserializer or module](#custom-serialization) -will be ignored. - -## custom serialization - -```Java -ArangoDB arangoDB = new ArangoDB.Builder().registerModule(new VPackModule() { - @Override - public > void setup(final C context) { - context.registerDeserializer(MyObject.class, new VPackDeserializer() { - @Override - public MyObject deserialize(VPackSlice parent,VPackSlice vpack, - VPackDeserializationContext context) throws VPackException { - MyObject obj = new MyObject(); - obj.setName(vpack.get("name").getAsString()); - return obj; - } - }); - context.registerSerializer(MyObject.class, new VPackSerializer() { - @Override - public void serialize(VPackBuilder builder,String attribute,MyObject value, - VPackSerializationContext context) throws VPackException { - builder.add(attribute, ValueType.OBJECT); - builder.add("name", value.getName()); - builder.close(); - } - }); - } -}).build(); -``` - -## JavaBeans - -The driver can serialize/deserialize JavaBeans. They need at least a -constructor without parameter. - -```Java -public class MyObject { - - private String name; - private Gender gender; - private int age; - - public MyObject() { - super(); - } - -} -``` - -## Internal fields - -To use Arango-internal fields (like \_id, \_key, \_rev, \_from, \_to) in your -JavaBeans, use the annotation `DocumentField`. - -```Java -public class MyObject { - - @DocumentField(Type.KEY) - private String key; - - private String name; - private Gender gender; - private int age; - - public MyObject() { - super(); - } - -} -``` - -## Serialized fieldnames - -To use a different serialized name for a field, use the annotation `SerializedName`. - -```Java -public class MyObject { - - @SerializedName("title") - private String name; - - private Gender gender; - private int age; - - public MyObject() { - super(); - } - -} -``` - -## Ignore fields - -To ignore fields at serialization/deserialization, use the annotation `Expose` - -```Java -public class MyObject { - - @Expose - private String name; - @Expose(serialize = true, deserialize = false) - private Gender gender; - private int age; - - public MyObject() { - super(); - } - -} -``` - -## Custom serializer - -```Java -ArangoDB arangoDB = new ArangoDB.Builder().registerModule(new VPackModule() { - @Override - public > void setup(final C context) { - context.registerDeserializer(MyObject.class, new VPackDeserializer() { - @Override - public MyObject deserialize(VPackSlice parent,VPackSlice vpack, - VPackDeserializationContext context) throws VPackException { - MyObject obj = new MyObject(); - obj.setName(vpack.get("name").getAsString()); - return obj; - } - }); - context.registerSerializer(MyObject.class, new VPackSerializer() { - @Override - public void serialize(VPackBuilder builder,String attribute,MyObject value, - VPackSerializationContext context) throws VPackException { - builder.add(attribute, ValueType.OBJECT); - builder.add("name", value.getName()); - builder.close(); - } - }); - } -}).build(); -``` - -## Manual serialization - -To de-/serialize from and to VelocyPack before or after a database call, use the -`ArangoUtil` from the method `util()` in `ArangoDB`, `ArangoDatabase`, -`ArangoCollection`, `ArangoGraph`, `ArangoEdgeCollection`or `ArangoVertexCollection`. - -```Java -ArangoDB arangoDB = new ArangoDB.Builder(); -VPackSlice vpack = arangoDB.util().serialize(myObj); -``` - -```Java -ArangoDB arangoDB = new ArangoDB.Builder(); -MyObject myObj = arangoDB.util().deserialize(vpack, MyObject.class); -``` diff --git a/Documentation/Books/Drivers/Java/Reference/Setup.md b/Documentation/Books/Drivers/Java/Reference/Setup.md deleted file mode 100644 index b1eebc79a4fa..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/Setup.md +++ /dev/null @@ -1,198 +0,0 @@ - -# Driver setup - -Setup with default configuration, this automatically loads a properties file -`arangodb.properties` if exists in the classpath: - -```Java -// this instance is thread-safe -ArangoDB arangoDB = new ArangoDB.Builder().build(); -``` - -The driver is configured with some default values: - -| property-key | description | default value | -| ------------------------ | --------------------------------------- | -------------- | -| arangodb.hosts | ArangoDB hosts | 127.0.0.1:8529 | -| arangodb.timeout | connect & request timeout (millisecond) | 0 | -| arangodb.user | Basic Authentication User | -| arangodb.password | Basic Authentication Password | -| arangodb.useSsl | use SSL connection | false | -| arangodb.chunksize | VelocyStream Chunk content-size (bytes) | 30000 | -| arangodb.connections.max | max number of connections | 1 VST, 20 HTTP | -| arangodb.protocol | used network protocol | VST | - -To customize the configuration the parameters can be changed in the code... - -```Java -ArangoDB arangoDB = new ArangoDB.Builder() - .host("192.168.182.50", 8888) - .build(); -``` - -... or with a custom properties file (my.properties) - -```Java -InputStream in = MyClass.class.getResourceAsStream("my.properties"); -ArangoDB arangoDB = new ArangoDB.Builder() - .loadProperties(in) - .build(); -``` - -Example for arangodb.properties: - -``` -arangodb.hosts=127.0.0.1:8529,127.0.0.1:8529 -arangodb.user=root -arangodb.password= -``` - -## Network protocol - -The drivers default used network protocol is the binary protocol VelocyStream -which offers the best performance within the driver. To use HTTP, you have to -set the configuration `useProtocol` to `Protocol.HTTP_JSON` for HTTP with JSON -content or `Protocol.HTTP_VPACK` for HTTP with -[VelocyPack](https://github.com/arangodb/velocypack/blob/master/VelocyPack.md) content. - -```Java -ArangoDB arangoDB = new ArangoDB.Builder() - .useProtocol(Protocol.VST) - .build(); -``` - -In addition to set the configuration for HTTP you have to add the -apache httpclient to your classpath. - -```XML - - org.apache.httpcomponents - httpclient - 4.5.1 - -``` - -**Note**: If you are using ArangoDB 3.0.x you have to set the protocol to -`Protocol.HTTP_JSON` because it is the only one supported. - -## SSL - -To use SSL, you have to set the configuration `useSsl` to `true` and set a `SSLContext` -(see [example code](https://github.com/arangodb/arangodb-java-driver/blob/master/src/test/java/com/arangodb/example/ssl/SslExample.java)). - -```Java -ArangoDB arangoDB = new ArangoDB.Builder() - .useSsl(true) - .sslContext(sc) - .build(); -``` - -## Connection Pooling - -The driver supports connection pooling for VelocyStream with a default of 1 and -HTTP with a default of 20 maximum connections per host. To change this value -use the method `maxConnections(Integer)` in `ArangoDB.Builder`. - -```Java -ArangoDB arangoDB = new ArangoDB.Builder() - .maxConnections(8) - .build(); -``` - -The driver does not explicitly release connections. To avoid exhaustion of -resources when no connection is needed, you can clear the connection pool -(close all connections to the server) or use [connection TTL](#connection-time-to-live). - -```Java -arangoDB.shutdown(); -``` - -## Fallback hosts - -The driver supports configuring multiple hosts. The first host is used to open a -connection to. When this host is not reachable the next host from the list is used. -To use this feature just call the method `host(String, int)` multiple times. - -```Java -ArangoDB arangoDB = new ArangoDB.Builder() - .host("host1", 8529) - .host("host2", 8529) - .build(); -``` - -Since version 4.3 the driver support acquiring a list of known hosts in a -cluster setup or a single server setup with followers. For this the driver has -to be able to successfully open a connection to at least one host to get the -list of hosts. Then it can use this list when fallback is needed. To use this -feature just pass `true` to the method `acquireHostList(boolean)`. - -```Java -ArangoDB arangoDB = new ArangoDB.Builder() - .acquireHostList(true) - .build(); -``` - -## Load Balancing - -Since version 4.3 the driver supports load balancing for cluster setups in -two different ways. - -The first one is a round robin load balancing where the driver iterates -through a list of known hosts and performs every request on a different -host than the request before. - -```Java -ArangoDB arangoDB = new ArangoDB.Builder() - .loadBalancingStrategy(LoadBalancingStrategy.ROUND_ROBIN) - .build(); -``` - -Just like the Fallback hosts feature the round robin load balancing strategy -can use the `acquireHostList` configuration to acquire a list of all known hosts -in the cluster. Do so only requires the manually configuration of only one host. -Because this list is updated frequently it makes load balancing over the whole -cluster very comfortable. - -```Java -ArangoDB arangoDB = new ArangoDB.Builder() - .loadBalancingStrategy(LoadBalancingStrategy.ROUND_ROBIN) - .acquireHostList(true) - .build(); -``` - -The second load balancing strategy allows to pick a random host from the -configured or acquired list of hosts and sticks to that host as long as the -connection is open. This strategy is useful for an application - using the driver - -which provides a session management where each session has its own instance of -`ArangoDB` build from a global configured list of hosts. In this case it could -be wanted that every sessions sticks with all its requests to the same host but -not all sessions should use the same host. This load balancing strategy also -works together with `acquireHostList`. - -```Java -ArangoDB arangoDB = new ArangoDB.Builder() - .loadBalancingStrategy(LoadBalancingStrategy.ONE_RANDOM) - .acquireHostList(true) - .build(); -``` - -## Connection time to live - -Since version 4.4 the driver supports setting a TTL (time to life) in milliseconds -for connections managed by the internal connection pool. - -```Java -ArangoDB arango = new ArangoDB.Builder() - .connectionTtl(5 * 60 * 1000) - .build(); -``` - -In this example all connections will be closed/reopened after 5 minutes. - -Connection TTL can be disabled setting it to `null`: - -```Java -.connectionTtl(null) -``` - -The default TTL is `null` (no automatic connection closure). diff --git a/Documentation/Books/Drivers/Java/Reference/View/ArangoSearch.md b/Documentation/Books/Drivers/Java/Reference/View/ArangoSearch.md deleted file mode 100644 index 0b70d91acfbf..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/View/ArangoSearch.md +++ /dev/null @@ -1,274 +0,0 @@ - -# ArangoSearch API - -These functions implement the -[HTTP API for ArangoSearch views](../../../..//HTTP/Views/ArangoSearch.html). - -## ArangoDatabase.createArangoSearch - -`ArangoDatabase.createArangoSearch(String name, ArangoSearchCreateOptions options) : ViewEntity` - -Creates a ArangoSearch view with the given _options_, then returns -view information from the server. - -**Arguments** - -- **name**: `String` - - The name of the view - -- **options**: `ArangoSearchCreateOptions` - - - **consolidationIntervalMsec**: `Long` - - Wait at least this many milliseconds between committing index data changes - and making them visible to queries (default: 60000, to disable use: 0). - For the case where there are a lot of inserts/updates, a lower value, - until commit, will cause the index not to account for them and memory usage - would continue to grow. For the case where there are a few inserts/updates, - a higher value will impact performance and waste disk space for each - commit call without any added benefits. - - - **cleanupIntervalStep**: `Long` - - Wait at least this many commits between removing unused files in - data directory (default: 10, to disable use: 0). For the case where the - consolidation policies merge segments often (i.e. a lot of commit+consolidate), - a lower value will cause a lot of disk space to be wasted. For the case - where the consolidation policies rarely merge segments (i.e. few inserts/deletes), - a higher value will impact performance without any added benefits. - - - **consolidationPolicy**: - - - **type**: `ConsolidationType` - - The type of the consolidation policy. - - - **threshold**: `Double` - - Select a given segment for "consolidation" if and only if the formula - based on type (as defined above) evaluates to true, valid value range - [0.0, 1.0] (default: 0.85) - - - **segmentThreshold**: `Long` - - Apply the "consolidation" operation if and only if (default: 300): - `{segmentThreshold} < number_of_segments` - - - **link**: `CollectionLink[]` - - A list of linked collections - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -db.createArangoSearch("potatoes", new ArangoSearchPropertiesOptions()); -// the ArangoSearch view "potatoes" now exists -``` - -## ArangoSearch.create - -`ArangoSearch.create(ArangoSearchCreateOptions options) : ViewEntity` - -Creates a ArangoSearch view with the given _options_, then returns view information from the server. - -Alternative for `ArangoDatabase.createArangoSearch`. - -**Arguments** - -- **options**: `ArangoSearchCreateOptions` - - - **consolidationIntervalMsec**: `Long` - - Wait at least this many milliseconds between committing index data changes - and making them visible to queries (default: 60000, to disable use: 0). - For the case where there are a lot of inserts/updates, a lower value, - until commit, will cause the index not to account for them and memory usage - would continue to grow. For the case where there are a few inserts/updates, - a higher value will impact performance and waste disk space for each - commit call without any added benefits. - - - **cleanupIntervalStep**: `Long` - - Wait at least this many commits between removing unused files in - data directory (default: 10, to disable use: 0). For the case where the - consolidation policies merge segments often (i.e. a lot of commit+consolidate), - a lower value will cause a lot of disk space to be wasted. For the case - where the consolidation policies rarely merge segments (i.e. few inserts/deletes), - a higher value will impact performance without any added benefits. - - - **consolidationPolicy**: - - - **type**: `ConsolidationType` - - The type of the consolidation policy. - - - **threshold**: `Double` - - Select a given segment for "consolidation" if and only if the formula - based on type (as defined above) evaluates to true, valid value range - [0.0, 1.0] (default: 0.85) - - - **segmentThreshold**: `Long` - - Apply the "consolidation" operation if and only if (default: 300): - `{segmentThreshold} < number_of_segments` - - - **link**: `CollectionLink[]` - - A list of linked collections - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoSearch view = db.arangoSearch("potatoes"); - -view.create(new ArangoSearchPropertiesOptions()); -// the ArangoSearch view "potatoes" now exists -``` - -## ArangoSearch.getProperties - -`ArangoSearch.getProperties() : ArangoSearchPropertiesEntity` - -Reads the properties of the specified view. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoSearch view = db.arangoSearch("potatoes"); - -ArangoSearchPropertiesEntity properties = view.getProperties(); -``` - -## ArangoSearch.updateProperties - -`ArangoSearch.updateProperties(ArangoSearchPropertiesOptions options) : ArangoSearchPropertiesEntity` - -Partially changes properties of the view. - -**Arguments** - -- **options**: `ArangoSearchPropertiesOptions` - - - **consolidationIntervalMsec**: `Long` - - Wait at least this many milliseconds between committing index data changes - and making them visible to queries (default: 60000, to disable use: 0). - For the case where there are a lot of inserts/updates, a lower value, - until commit, will cause the index not to account for them and memory usage - would continue to grow. For the case where there are a few inserts/updates, - a higher value will impact performance and waste disk space for each - commit call without any added benefits. - - - **cleanupIntervalStep**: `Long` - - Wait at least this many commits between removing unused files in - data directory (default: 10, to disable use: 0). For the case where the - consolidation policies merge segments often (i.e. a lot of commit+consolidate), - a lower value will cause a lot of disk space to be wasted. For the case - where the consolidation policies rarely merge segments (i.e. few inserts/deletes), - a higher value will impact performance without any added benefits. - - - **consolidationPolicy**: - - - **type**: `ConsolidationType` - - The type of the consolidation policy. - - - **threshold**: `Double` - - Select a given segment for "consolidation" if and only if the formula - based on type (as defined above) evaluates to true, valid value range - [0.0, 1.0] (default: 0.85) - - - **segmentThreshold**: `Long` - - Apply the "consolidation" operation if and only if (default: 300): - `{segmentThreshold} < number_of_segments` - - - **link**: `CollectionLink[]` - - A list of linked collections - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoSearch view = db.arangoSearch("some-view"); - -view.updateProperties( - new ArangoSearchPropertiesOptions() - .link(CollectionLink.on("myCollection").fields(FieldLink.on("value").analyzers("identity"))) -); -``` - -## ArangoSearch.replaceProperties - -`ArangoSearch.replaceProperties(ArangoSearchPropertiesOptions options) : ArangoSearchPropertiesEntity` - -Changes properties of the view. - -**Arguments** - -- **options**: `ArangoSearchPropertiesOptions` - - - **consolidationIntervalMsec**: `Long` - - Wait at least this many milliseconds between committing index data changes - and making them visible to queries (default: 60000, to disable use: 0). - For the case where there are a lot of inserts/updates, a lower value, - until commit, will cause the index not to account for them and memory usage - would continue to grow. For the case where there are a few inserts/updates, - a higher value will impact performance and waste disk space for each - commit call without any added benefits. - - - **cleanupIntervalStep**: `Long` - - Wait at least this many commits between removing unused files in - data directory (default: 10, to disable use: 0). For the case where the - consolidation policies merge segments often (i.e. a lot of commit+consolidate), - a lower value will cause a lot of disk space to be wasted. For the case - where the consolidation policies rarely merge segments (i.e. few inserts/deletes), - a higher value will impact performance without any added benefits. - - - **consolidationPolicy**: - - - **type**: `ConsolidationType` - - The type of the consolidation policy. - - - **threshold**: `Double` - - Select a given segment for "consolidation" if and only if the formula - based on type (as defined above) evaluates to true, valid value range - [0.0, 1.0] (default: 0.85) - - - **segmentThreshold**: `Long` - - Apply the "consolidation" operation if and only if (default: 300): - `{segmentThreshold} < number_of_segments` - - - **link**: `CollectionLink[]` - - A list of linked collections - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoSearch view = db.arangoSearch("some-view"); - -view.replaceProperties( - new ArangoSearchPropertiesOptions() - .link(CollectionLink.on("myCollection").fields(FieldLink.on("value").analyzers("identity"))) -); -``` diff --git a/Documentation/Books/Drivers/Java/Reference/View/README.md b/Documentation/Books/Drivers/Java/Reference/View/README.md deleted file mode 100644 index 0091cf8148e3..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/View/README.md +++ /dev/null @@ -1,43 +0,0 @@ - -# View API - -These functions implement the -[HTTP API for views](../../../..//HTTP/Views/index.html). - -## Getting information about the view - -See -[the HTTP API documentation](../../../..//HTTP/Views/Getting.html) -for details. - -## ArangoView.exists - -`ArangoView.exists() : boolean` - -Checks whether the view exists - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoView view = db.view("potatoes"); - -boolean exists = view.exists(); -``` - -## ArangoView.getInfo - -`ArangoView.getInfo() : ViewEntity` - -Returns information about the view. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoView view = db.view("potatoes"); - -ViewEntity info = view.getInfo(); -``` diff --git a/Documentation/Books/Drivers/Java/Reference/View/ViewManipulation.md b/Documentation/Books/Drivers/Java/Reference/View/ViewManipulation.md deleted file mode 100644 index c85aa1289b01..000000000000 --- a/Documentation/Books/Drivers/Java/Reference/View/ViewManipulation.md +++ /dev/null @@ -1,71 +0,0 @@ - -# Manipulating the view - -These functions implement -[the HTTP API for modifying views](../../../..//HTTP/Views/Modifying.html). - -## ArangoDatabase.createView - -`ArangoDatabase.createView(String name, ViewType type) : ViewEntity` - -Creates a view of the given _type_, then returns view information from the server. - -**Arguments** - -- **name**: `String` - - The name of the view - -- **type**: `ViewType` - - The type of the view - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -db.createView("myView", ViewType.ARANGO_SEARCH); -// the view "potatoes" now exists -``` - -## ArangoView.rename - -`ArangoView.rename(String newName) : ViewEntity` - -Renames the view. - -**Arguments** - -- **newName**: `String` - - The new name - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoView view = db.view("some-view"); - -ViewEntity result = view.rename("new-view-name") -assertThat(result.getName(), is("new-view-name"); -// result contains additional information about the view -``` - -## ArangoView.drop - -`ArangoView.drop() : void` - -Deletes the view from the database. - -**Examples** - -```Java -ArangoDB arango = new ArangoDB.Builder().build(); -ArangoDatabase db = arango.db("myDB"); -ArangoView view = db.view("some-view"); - -view.drop(); -// the view "some-view" no longer exists -``` diff --git a/Documentation/Books/Drivers/PHP/GettingStarted/README.md b/Documentation/Books/Drivers/PHP/GettingStarted/README.md deleted file mode 100644 index 5c00b2a7ad1a..000000000000 --- a/Documentation/Books/Drivers/PHP/GettingStarted/README.md +++ /dev/null @@ -1,78 +0,0 @@ - -# ArangoDB-PHP - Getting Started -## Description - -This PHP client allows REST-based access to documents on the server. -The *DocumentHandler* class should be used for these purposes. -There is an example for REST-based documents access in the file examples/document.php. - -Furthermore, the PHP client also allows to issue more AQL complex queries using the *Statement* class. -There is an example for this kind of statements in the file examples/select.php. - -To use the PHP client, you must include the file autoloader.php from the main directory. -The autoloader will care about loading additionally required classes on the fly. The autoloader can be nested with other autoloaders. - -The ArangoDB PHP client is an API that allows you to send and retrieve documents from ArangoDB from out of your PHP application. The client library itself is written in PHP and has no further dependencies but just plain PHP 5.6 (or higher). - -The client library provides document and collection classes you can use to work with documents and collections in an OO fashion. When exchanging document data with the server, the library internally will use the [HTTP REST interface of ArangoDB](../../../HTTP/index.html). The library user does not have to care about this fact as all the details of the REST interface are abstracted by the client library. - -## Requirements - -* PHP version 5.6 or higher (Travis-tested with PHP 5.6, 7.0, 7.1 and hhvm) - -Note on PHP version support: - -This driver will cease to support old PHP versions as soon as they have reached end-of-life status. Support will be removed with the next minor or patch version of the driver to be released. - -In general, it is recommended to always use the latest PHP versions (currently those in the PHP 7 line) in order to take advantage of all the improvements (especially in performance). - -### Important version information on ArangoDB-PHP - -The ArangoDB-PHP driver version has to match with the ArangoDB version: - -- ArangoDB-PHP 3.1.x is on par with the functionality of ArangoDB 3.1.x -- ArangoDB-PHP 3.2.x is on par with the functionality of ArangoDB 3.2.x -- ArangoDB-PHP 3.3.x is on par with the functionality of ArangoDB 3.3.x - -etc... - - -### Installing the PHP client - -To get started you need PHP 5.6 or higher plus an ArangoDB server running on any host that you can access. - -There are two alternative ways to get the ArangoDB PHP client: - - * Using Composer - * Cloning the git repository - -#### Alternative 1: Using Composer - -``` -composer require triagens/arangodb -``` -#### Alternative 2: Cloning the git repository - -When preferring this alternative, you need to have a git client installed. To clone the ArangoDB PHP client repository from github, execute the following command in your project directory: - - git clone "https://github.com/arangodb/arangodb-php.git" - - -This will create a subdirectory arangodb-php in your current directory. It contains all the files of the client library. It also includes a dedicated autoloader that you can use for autoloading the client libraries class files. -To invoke this autoloader, add the following line to your PHP files that will use the library: - -```php -require 'arangodb-php/autoload.php'; -``` - - -The ArangoDB PHP client's autoloader will only care about its own class files and will not handle any other files. That means it is fully nestable with other autoloaders. - -#### Alternative 3: Invoking the autoloader directly - -If you do not wish to include autoload.php to load and setup the autoloader, you can invoke the autoloader directly: - -```php -require 'arangodb-php/lib/ArangoDBClient/autoloader.php'; -\ArangoDBClient\Autoloader::init(); -``` diff --git a/Documentation/Books/Drivers/PHP/README.md b/Documentation/Books/Drivers/PHP/README.md deleted file mode 100644 index 967e1a58e7be..000000000000 --- a/Documentation/Books/Drivers/PHP/README.md +++ /dev/null @@ -1,18 +0,0 @@ - -# ArangoDB-PHP - A PHP client for ArangoDB -The official ArangoDB PHP Driver. - -- [Getting Started](GettingStarted/README.md) -- [Tutorial](Tutorial/README.md) -- [Changelog](https://github.com/arangodb/arangodb-php/blob/devel/CHANGELOG.md#readme) - -# More information - -* Check the ArangoDB PHP client on github.com regularly for new releases and updates: [https://github.com/arangodb/arangodb-php](https://github.com/arangodb/arangodb-php) - -* More example code, containing some code to create, delete and rename collections, is provided in the [examples](https://github.com/arangodb/arangodb-php/tree/devel/examples) subdirectory that is provided with the library. - -* [PHPDoc documentation](http://arangodb.github.io/arangodb-php/) for the complete library - -* [Follow us on Twitter](https://twitter.com/arangodbphp) - [@arangodbphp](https://twitter.com/arangodbphp) to receive updates on the PHP driver diff --git a/Documentation/Books/Drivers/PHP/Tutorial/README.md b/Documentation/Books/Drivers/PHP/Tutorial/README.md deleted file mode 100644 index c69bfb5a1839..000000000000 --- a/Documentation/Books/Drivers/PHP/Tutorial/README.md +++ /dev/null @@ -1,889 +0,0 @@ - -# ArangoDB-PHP - Tutorial -## Setting up the connection options - -In order to use ArangoDB, you need to specify the connection options. We do so by creating a PHP array $connectionOptions. Put this code into a file named test.php in your current directory: - -```php -// use the following line when using Composer -// require __DIR__ . '/vendor/composer/autoload.php'; - -// use the following line when using git -require __DIR__ . '/arangodb-php/autoload.php'; - -// set up some aliases for less typing later -use ArangoDBClient\Collection as ArangoCollection; -use ArangoDBClient\CollectionHandler as ArangoCollectionHandler; -use ArangoDBClient\Connection as ArangoConnection; -use ArangoDBClient\ConnectionOptions as ArangoConnectionOptions; -use ArangoDBClient\DocumentHandler as ArangoDocumentHandler; -use ArangoDBClient\Document as ArangoDocument; -use ArangoDBClient\Exception as ArangoException; -use ArangoDBClient\Export as ArangoExport; -use ArangoDBClient\ConnectException as ArangoConnectException; -use ArangoDBClient\ClientException as ArangoClientException; -use ArangoDBClient\ServerException as ArangoServerException; -use ArangoDBClient\Statement as ArangoStatement; -use ArangoDBClient\UpdatePolicy as ArangoUpdatePolicy; - -// set up some basic connection options -$connectionOptions = [ - // database name - ArangoConnectionOptions::OPTION_DATABASE => '_system', - // server endpoint to connect to - ArangoConnectionOptions::OPTION_ENDPOINT => 'tcp://127.0.0.1:8529', - // authorization type to use (currently supported: 'Basic') - ArangoConnectionOptions::OPTION_AUTH_TYPE => 'Basic', - // user for basic authorization - ArangoConnectionOptions::OPTION_AUTH_USER => 'root', - // password for basic authorization - ArangoConnectionOptions::OPTION_AUTH_PASSWD => '', - // connection persistence on server. can use either 'Close' (one-time connections) or 'Keep-Alive' (re-used connections) - ArangoConnectionOptions::OPTION_CONNECTION => 'Keep-Alive', - // connect timeout in seconds - ArangoConnectionOptions::OPTION_TIMEOUT => 3, - // whether or not to reconnect when a keep-alive connection has timed out on server - ArangoConnectionOptions::OPTION_RECONNECT => true, - // optionally create new collections when inserting documents - ArangoConnectionOptions::OPTION_CREATE => true, - // optionally create new collections when inserting documents - ArangoConnectionOptions::OPTION_UPDATE_POLICY => ArangoUpdatePolicy::LAST, -]; - - -// turn on exception logging (logs to whatever PHP is configured) -ArangoException::enableLogging(); - - - $connection = new ArangoConnection($connectionOptions); - -``` - -This will make the client connect to ArangoDB - -* running on localhost (OPTION_HOST) -* on the default port 8529 (OPTION_PORT) -* with a connection timeout of 3 seconds (OPTION_TIMEOUT) - -When creating new documents in a collection that does not yet exist, you have the following choices: - -* auto-generate a new collection: if you prefer that, set OPTION_CREATE to true -* fail with an error: if you prefer this behavior, set OPTION_CREATE to false - -When updating a document that was previously/concurrently updated by another user, you can select between the following behaviors: - -* last update wins: if you prefer this, set OPTION_UPDATE_POLICY to last -* fail with a conflict error: if you prefer that, set OPTION_UPDATE_POLICY to conflict - - -## Setting up active failover - -By default the PHP client will connect to a single endpoint only, -by specifying a string value for the endpoint in the `ConnectionOptions`, -e.g. - -```php -$connectionOptions = [ - ArangoConnectionOptions::OPTION_ENDPOINT => 'tcp://127.0.0.1:8529' -]; -``` - -To set up multiple servers to connect to, it is also possible to specify -an array of servers instead: - -```php -$connectionOptions = [ - ConnectionOptions::OPTION_ENDPOINT => [ 'tcp://localhost:8531', 'tcp://localhost:8532', 'tcp://localhost:8530' ] -]; -``` -Using this option requires ArangoDB 3.3 or higher and the database running -in active failover mode. - -The driver will by default try to connect to the first server endpoint in the -endpoints array, and only try the following servers if no connection can be -established. If no connection can be made to any server, the driver will throw -an exception. - -As it is unknown to the driver which server from the array is the current -leader, the driver will connect to the specified servers in array order by -default. However, to spare a few unnecessary connection attempts to failed -servers, it is possible to set up caching (using Memcached) for the server list. -The cached value will contain the last working server first, so that as few -connection attempts as possible will need to be made. - -In order to use this caching, it is required to install the Memcached module -for PHP, and to set up the following relevant options in the `ConnectionOptions`: - -```php -$connectionOptions = [ - // memcached persistent id (will be passed to Memcached::__construct) - ConnectionOptions::OPTION_MEMCACHED_PERSISTENT_ID => 'arangodb-php-pool', - - // memcached servers to connect to (will be passed to Memcached::addServers) - ConnectionOptions::OPTION_MEMCACHED_SERVERS => [ [ '127.0.0.1', 11211 ] ], - - // memcached options (will be passed to Memcached::setOptions) - ConnectionOptions::OPTION_MEMCACHED_OPTIONS => [ ], - - // key to store the current endpoints array under - ConnectionOptions::OPTION_MEMCACHED_ENDPOINTS_KEY => 'arangodb-php-endpoints' - - // time-to-live for the endpoints array stored in memcached - ConnectionOptions::OPTION_MEMCACHED_TTL => 600 -]; -``` - - -## Creating a collection -*This is just to show how a collection is created.* -*For these examples it is not needed to create a collection prior to inserting a document, as we set ArangoConnectionOptions::OPTION_CREATE to true.* - -So, after we get the settings, we can start with creating a collection. We will create a collection named "users". - -The below code will first set up the collection locally in a variable name $user, and then push it to the server and return the collection id created by the server: - -```php - $collectionHandler = new ArangoCollectionHandler($connection); - - // clean up first - if ($collectionHandler->has('users')) { - $collectionHandler->drop('users'); - } - if ($collectionHandler->has('example')) { - $collectionHandler->drop('example'); - } - - // create a new collection - $userCollection = new ArangoCollection(); - $userCollection->setName('users'); - $id = $collectionHandler->create($userCollection); - - // print the collection id created by the server - var_dump($id); - // check if the collection exists - $result = $collectionHandler->has('users'); - var_dump($result); - - ``` -## Creating a document - -After we created the collection, we can start with creating an initial document. We will create a user document in a collection named "users". This collection does not need to exist yet. The first document we'll insert in this collection will create the collection on the fly. This is because we have set OPTION_CREATE to true in $connectionOptions. - -The below code will first set up the document locally in a variable name $user, and then push it to the server and return the document id created by the server: - -```php - $handler = new ArangoDocumentHandler($connection); - - // create a new document - $user = new ArangoDocument(); - - // use set method to set document properties - $user->set('name', 'John'); - $user->set('age', 25); - $user->set('thisIsNull', null); - - // use magic methods to set document properties - $user->likes = ['fishing', 'hiking', 'swimming']; - - // send the document to the server - $id = $handler->save('users', $user); - - // check if a document exists - $result = $handler->has('users', $id); - var_dump($result); - - // print the document id created by the server - var_dump($id); - var_dump($user->getId()); -``` - -Document properties can be set by using the set() method, or by directly manipulating the document properties. - -As you can see, sending a document to the server is achieved by calling the save() method on the client library's *DocumentHandler* class. It needs the collection name ("users" in this case") plus the document object to be saved. save() will return the document id as created by the server. The id is a numeric value that might or might not fit in a PHP integer. - -## Adding exception handling - - -The above code will work but it does not check for any errors. To make it work in the face of errors, we'll wrap it into some basic exception handlers - -```php -try { - $handler = new ArangoDocumentHandler($connection); - - // create a new document - $user = new ArangoDocument(); - - // use set method to set document properties - $user->set('name', 'John'); - $user->set('age', 25); - - // use magic methods to set document properties - $user->likes = ['fishing', 'hiking', 'swimming']; - - // send the document to the server - $id = $handler->save('users', $user); - - // check if a document exists - $result = $handler->has('users', $id); - var_dump($result); - - // print the document id created by the server - var_dump($id); - var_dump($user->getId()); -} catch (ArangoConnectException $e) { - print 'Connection error: ' . $e->getMessage() . PHP_EOL; -} catch (ArangoClientException $e) { - print 'Client error: ' . $e->getMessage() . PHP_EOL; -} catch (ArangoServerException $e) { - print 'Server error: ' . $e->getServerCode() . ':' . $e->getServerMessage() . ' ' . $e->getMessage() . PHP_EOL; -} -``` - -## Retrieving a document - -To retrieve a document from the server, the get() method of the *DocumentHandler* class can be used. It needs the collection name plus a document id. There is also the getById() method which is an alias for get(). - -```php - // get the document back from the server - $userFromServer = $handler->get('users', $id); - var_dump($userFromServer); - -/* -The result of the get() method is a Document object that you can use in an OO fashion: - -object(ArangoDBClient\Document)##6 (4) { - ["_id":"ArangoDBClient\Document":private]=> - string(15) "2377907/4818344" - ["_rev":"ArangoDBClient\Document":private]=> - int(4818344) - ["_values":"ArangoDBClient\Document":private]=> - array(3) { - ["age"]=> - int(25) - ["name"]=> - string(4) "John" - ["likes"]=> - array(3) { - [0]=> - string(7) "fishing" - [1]=> - string(6) "hiking" - [2]=> - string(8) "swimming" - } - } - ["_changed":"ArangoDBClient\Document":private]=> - bool(false) -} -*/ -``` - -Whenever the document id is yet unknown, but you want to fetch a document from the server by any of its other properties, you can use the CollectionHandler->byExample() method. It allows you to provide an example of the document that you are looking for. The example should either be a Document object with the relevant properties set, or, a PHP array with the propeties that you are looking for: - -```php - // get a document list back from the server, using a document example - $cursor = $collectionHandler->byExample('users', ['name' => 'John']); - var_dump($cursor->getAll()); - -``` - -This will return all documents from the specified collection (here: "users") with the properties provided in the example (here: that have an attribute "name" with a value of "John"). The result is a cursor which can be iterated sequentially or completely. We have chosen to get the complete result set above by calling the cursor's getAll() method. -Note that CollectionHandler->byExample() might return multiple documents if the example is ambigious. - -## Updating a document - - -To update an existing document, the update() method of the *DocumentHandler* class can be used. -In this example we want to -- set state to 'ca' -- change the `likes` array. - -```php - // update a document - $userFromServer->likes = ['fishing', 'swimming']; - $userFromServer->state = 'CA'; - - $result = $handler->update($userFromServer); - var_dump($result); - - $userFromServer = $handler->get('users', $id); - var_dump($userFromServer); - -``` - -To remove an attribute using the update() method, an option has to be passed telling it to not keep attributes with null values. -In this example we want to -- remove the `age` - -```php - // update a document removing an attribute, - // The 'keepNull'=>false option will cause ArangoDB to - // remove all attributes in the document, - // that have null as their value - not only the ones defined here - - $userFromServer->likes = ['fishing', 'swimming']; - $userFromServer->state = 'CA'; - $userFromServer->age = null; - - $result = $handler->update($userFromServer, ['keepNull' => false]); - var_dump($result); - - $userFromServer = $handler->get('users', $id); - var_dump($userFromServer); -``` - -To completely replace an existing document, the replace() method of the *DocumentHandler* class can be used. -In this example we want to remove the `state` attribute. - -```php - // replace a document (notice that we are using the previously fetched document) - // In this example we are removing the state attribute - unset($userFromServer->state); - - $result = $handler->replace($userFromServer); - var_dump($result); - - $userFromServer = $handler->get('users', $id); - var_dump($userFromServer); -``` - -The document that is replaced using the previous example must have been fetched from the server before. If you want to update a document without having fetched it from the server before, use updateById(): - -```php - // replace a document, identified by collection and document id - $user = new ArangoDocument(); - $user->name = 'John'; - $user->likes = ['Running', 'Rowing']; - $userFromServer->state = 'CA'; - - // Notice that for the example we're getting the existing - // document id via a method call. Normally we would use the known id - $result = $handler->replaceById('users', $userFromServer->getId(), $user); - var_dump($result); - - $userFromServer = $handler->get('users', $id); - var_dump($userFromServer); - -``` - -## Deleting a document - -To remove an existing document on the server, the remove() method of the *DocumentHandler* class will do. remove() just needs the document to be removed as a parameter: - -```php - // remove a document on the server, using a document object - $result = $handler->remove($userFromServer); - var_dump($result); -``` - -Note that the document must have been fetched from the server before. If you haven't fetched the document from the server before, use the removeById() method. This requires just the collection name (here: "users") and the document id. - -```php - // remove a document on the server, using a collection id and document id - // In this example, we are using the id of the document we deleted in the previous example, - // so it will throw an exception here. (we are catching it though, in order to continue) - - try { - $result = $handler->removeById('users', $userFromServer->getId()); - } catch (\ArangoDBClient\ServerException $e) { - $e->getMessage(); - } -``` - - -## Running an AQL query - - -To run an AQL query, use the *Statement* class. - -The method Statement::execute creates a Cursor object which can be used to iterate over -the query's result set. - -```php - // create a statement to insert 1000 test users - $statement = new ArangoStatement( - $connection, [ - 'query' => 'FOR i IN 1..1000 INSERT { _key: CONCAT("test", i) } IN users' - ] - ); - - // execute the statement - $cursor = $statement->execute(); - - - // now run another query on the data, using bind parameters - $statement = new ArangoStatement( - $connection, [ - 'query' => 'FOR u IN @@collection FILTER u.name == @name RETURN u', - 'bindVars' => [ - '@collection' => 'users', - 'name' => 'John' - ] - ] - ); - - // executing the statement returns a cursor - $cursor = $statement->execute(); - - // easiest way to get all results returned by the cursor - var_dump($cursor->getAll()); - - // to get statistics for the query, use Cursor::getExtra(); - var_dump($cursor->getExtra()); - -``` - -Note: by default the Statement object will create a Cursor that converts each value into -a Document object. This is normally the intended behavior for AQL queries that return -entire documents. However, an AQL query can also return projections or any other data -that cannot be converted into Document objects. - -In order to suppress the conversion into Document objects, the Statement must be given -the `_flat` attribute. This allows processing the results of arbitrary AQL queries: - - -```php - // run an AQL query that does not return documents but scalars - // we need to set the _flat attribute of the Statement in order for this to work - $statement = new ArangoStatement( - $connection, [ - 'query' => 'FOR i IN 1..1000 RETURN i', - '_flat' => true - ] - ); - - // executing the statement returns a cursor - $cursor = $statement->execute(); - - // easiest way to get all results returned by the cursor - // note that now the results won't be converted into Document objects - var_dump($cursor->getAll()); - -``` - - -## Exporting data - - -To export the contents of a collection to PHP, use the *Export* class. -The *Export* class will create a light-weight cursor over all documents -of the specified collection. The results can be transferred to PHP -in chunks incrementally. This is the most efficient way of iterating -over all documents in a collection. - - -```php - // creates an export object for collection users - $export = new ArangoExport($connection, 'users', []); - - // execute the export. this will return a special, forward-only cursor - $cursor = $export->execute(); - - // now we can fetch the documents from the collection in blocks - while ($docs = $cursor->getNextBatch()) { - // do something with $docs - var_dump($docs); - } - - // the export can also be restricted to just a few attributes per document: - $export = new ArangoExport( - $connection, 'users', [ - '_flat' => true, - 'restrict' => [ - 'type' => 'include', - 'fields' => ['_key', 'likes'] - ] - ] - ); - - // now fetch just the configured attributes for each document - while ($docs = $cursor->getNextBatch()) { - // do something with $docs - var_dump($docs); - } -``` - -## Bulk document handling - - -The ArangoDB-PHP driver provides a mechanism to easily fetch multiple documents from -the same collection with a single request. All that needs to be provided is an array -of document keys: - - -```php - $exampleCollection = new ArangoCollection(); - $exampleCollection->setName('example'); - $id = $collectionHandler->create($exampleCollection); - - // create a statement to insert 100 example documents - $statement = new ArangoStatement( - $connection, [ - 'query' => 'FOR i IN 1..100 INSERT { _key: CONCAT("example", i), value: i } IN example' - ] - ); - $statement->execute(); - - // later on, we can assemble a list of document keys - $keys = []; - for ($i = 1; $i <= 100; ++$i) { - $keys[] = 'example' . $i; - } - // and fetch all the documents at once - $documents = $collectionHandler->lookupByKeys('example', $keys); - var_dump($documents); - - // we can also bulk-remove them: - $result = $collectionHandler->removeByKeys('example', $keys); - - var_dump($result); - - -``` -## Dropping a collection - - -To drop an existing collection on the server, use the drop() method of the *CollectionHandler* class. -drop() just needs the name of the collection name to be dropped: - -```php - // drop a collection on the server, using its name, - $result = $collectionHandler->drop('users'); - var_dump($result); - - // drop the other one we created, too - $collectionHandler->drop('example'); -``` - -# Custom Document class - -If you want to use custom document class you can pass it's name to DocumentHandler or CollectionHandler using method `setDocumentClass`. -Remember that Your class must extend `\ArangoDBClient\Document`. - -```php -$ch = new CollectionHandler($connection); -$ch->setDocumentClass('\AppBundle\Entity\Product'); -$cursor = $ch->all('product'); -// All returned documents will be \AppBundle\Entity\Product instances - - -$dh = new DocumentHandler($connection); -$dh->setDocumentClass('\AppBundle\Entity\Product'); -$product = $dh->get('products', 11231234); -// Product will be \AppBundle\Entity\Product instance -``` - -See file examples/customDocumentClass.php for more details. - -## Logging exceptions - - -The driver provides a simple logging mechanism that is turned off by default. If it is turned on, the driver -will log all its exceptions using PHP's standard `error_log` mechanism. It will call PHP's `error_log()` -function for this. It depends on the PHP configuration if and where exceptions will be logged. Please consult -your php.ini settings for further details. - -To turn on exception logging in the driver, set a flag on the driver's Exception base class, from which all -driver exceptions are subclassed: - -```php -use ArangoDBClient\Exception as ArangoException; - -ArangoException::enableLogging(); -``` - -To turn logging off, call its `disableLogging` method: - -```php -use ArangoDBClient\Exception as ArangoException; - -ArangoException::disableLogging(); -``` - -## Putting it all together - -Here's the full code that combines all the pieces outlined above: - -```php -// use the following line when using Composer -// require __DIR__ . '/vendor/composer/autoload.php'; - -// use the following line when using git -require __DIR__ . '/autoload.php'; - -// set up some aliases for less typing later -use ArangoDBClient\Collection as ArangoCollection; -use ArangoDBClient\CollectionHandler as ArangoCollectionHandler; -use ArangoDBClient\Connection as ArangoConnection; -use ArangoDBClient\ConnectionOptions as ArangoConnectionOptions; -use ArangoDBClient\DocumentHandler as ArangoDocumentHandler; -use ArangoDBClient\Document as ArangoDocument; -use ArangoDBClient\Exception as ArangoException; -use ArangoDBClient\Export as ArangoExport; -use ArangoDBClient\ConnectException as ArangoConnectException; -use ArangoDBClient\ClientException as ArangoClientException; -use ArangoDBClient\ServerException as ArangoServerException; -use ArangoDBClient\Statement as ArangoStatement; -use ArangoDBClient\UpdatePolicy as ArangoUpdatePolicy; - -// set up some basic connection options -$connectionOptions = [ - // database name - ArangoConnectionOptions::OPTION_DATABASE => '_system', - // server endpoint to connect to - ArangoConnectionOptions::OPTION_ENDPOINT => 'tcp://127.0.0.1:8529', - // authorization type to use (currently supported: 'Basic') - ArangoConnectionOptions::OPTION_AUTH_TYPE => 'Basic', - // user for basic authorization - ArangoConnectionOptions::OPTION_AUTH_USER => 'root', - // password for basic authorization - ArangoConnectionOptions::OPTION_AUTH_PASSWD => '', - // connection persistence on server. can use either 'Close' (one-time connections) or 'Keep-Alive' (re-used connections) - ArangoConnectionOptions::OPTION_CONNECTION => 'Keep-Alive', - // connect timeout in seconds - ArangoConnectionOptions::OPTION_TIMEOUT => 3, - // whether or not to reconnect when a keep-alive connection has timed out on server - ArangoConnectionOptions::OPTION_RECONNECT => true, - // optionally create new collections when inserting documents - ArangoConnectionOptions::OPTION_CREATE => true, - // optionally create new collections when inserting documents - ArangoConnectionOptions::OPTION_UPDATE_POLICY => ArangoUpdatePolicy::LAST, -]; - - -// turn on exception logging (logs to whatever PHP is configured) -ArangoException::enableLogging(); - -try { - $connection = new ArangoConnection($connectionOptions); - - $collectionHandler = new ArangoCollectionHandler($connection); - - // clean up first - if ($collectionHandler->has('users')) { - $collectionHandler->drop('users'); - } - if ($collectionHandler->has('example')) { - $collectionHandler->drop('example'); - } - - // create a new collection - $userCollection = new ArangoCollection(); - $userCollection->setName('users'); - $id = $collectionHandler->create($userCollection); - - // print the collection id created by the server - var_dump($id); - - // check if the collection exists - $result = $collectionHandler->has('users'); - var_dump($result); - - $handler = new ArangoDocumentHandler($connection); - - // create a new document - $user = new ArangoDocument(); - - // use set method to set document properties - $user->set('name', 'John'); - $user->set('age', 25); - $user->set('thisIsNull', null); - - // use magic methods to set document properties - $user->likes = ['fishing', 'hiking', 'swimming']; - - // send the document to the server - $id = $handler->save('users', $user); - - // check if a document exists - $result = $handler->has('users', $id); - var_dump($result); - - // print the document id created by the server - var_dump($id); - var_dump($user->getId()); - - - // get the document back from the server - $userFromServer = $handler->get('users', $id); - var_dump($userFromServer); - - // get a document list back from the server, using a document example - $cursor = $collectionHandler->byExample('users', ['name' => 'John']); - var_dump($cursor->getAll()); - - - // update a document - $userFromServer->likes = ['fishing', 'swimming']; - $userFromServer->state = 'CA'; - - $result = $handler->update($userFromServer); - var_dump($result); - - $userFromServer = $handler->get('users', $id); - var_dump($userFromServer); - - - // update a document removing an attribute, - // The 'keepNull'=>false option will cause ArangoDB to - // remove all attributes in the document, - // that have null as their value - not only the ones defined here - - $userFromServer->likes = ['fishing', 'swimming']; - $userFromServer->state = 'CA'; - $userFromServer->age = null; - - $result = $handler->update($userFromServer, ['keepNull' => false]); - var_dump($result); - - $userFromServer = $handler->get('users', $id); - var_dump($userFromServer); - - - // replace a document (notice that we are using the previously fetched document) - // In this example we are removing the state attribute - unset($userFromServer->state); - - $result = $handler->replace($userFromServer); - var_dump($result); - - $userFromServer = $handler->get('users', $id); - var_dump($userFromServer); - - - // replace a document, identified by collection and document id - $user = new ArangoDocument(); - $user->name = 'John'; - $user->likes = ['Running', 'Rowing']; - $userFromServer->state = 'CA'; - - // Notice that for the example we're getting the existing - // document id via a method call. Normally we would use the known id - $result = $handler->replaceById('users', $userFromServer->getId(), $user); - var_dump($result); - - $userFromServer = $handler->get('users', $id); - var_dump($userFromServer); - - - // remove a document on the server - $result = $handler->remove($userFromServer); - var_dump($result); - - - // remove a document on the server, using a collection id and document id - // In this example, we are using the id of the document we deleted in the previous example, - // so it will throw an exception here. (we are catching it though, in order to continue) - - try { - $result = $handler->removeById('users', $userFromServer->getId()); - } catch (\ArangoDBClient\ServerException $e) { - $e->getMessage(); - } - - - - // create a statement to insert 1000 test users - $statement = new ArangoStatement( - $connection, [ - 'query' => 'FOR i IN 1..1000 INSERT { _key: CONCAT("test", i) } IN users' - ] - ); - - // execute the statement - $cursor = $statement->execute(); - - - // now run another query on the data, using bind parameters - $statement = new ArangoStatement( - $connection, [ - 'query' => 'FOR u IN @@collection FILTER u.name == @name RETURN u', - 'bindVars' => [ - '@collection' => 'users', - 'name' => 'John' - ] - ] - ); - - // executing the statement returns a cursor - $cursor = $statement->execute(); - - // easiest way to get all results returned by the cursor - var_dump($cursor->getAll()); - - // to get statistics for the query, use Cursor::getExtra(); - var_dump($cursor->getExtra()); - - - // creates an export object for collection users - $export = new ArangoExport($connection, 'users', []); - - // execute the export. this will return a special, forward-only cursor - $cursor = $export->execute(); - - // now we can fetch the documents from the collection in blocks - while ($docs = $cursor->getNextBatch()) { - // do something with $docs - var_dump($docs); - } - - // the export can also be restricted to just a few attributes per document: - $export = new ArangoExport( - $connection, 'users', [ - '_flat' => true, - 'restrict' => [ - 'type' => 'include', - 'fields' => ['_key', 'likes'] - ] - ] - ); - - // now fetch just the configured attributes for each document - while ($docs = $cursor->getNextBatch()) { - // do something with $docs - var_dump($docs); - } - - - $exampleCollection = new ArangoCollection(); - $exampleCollection->setName('example'); - $id = $collectionHandler->create($exampleCollection); - - // create a statement to insert 100 example documents - $statement = new ArangoStatement( - $connection, [ - 'query' => 'FOR i IN 1..100 INSERT { _key: CONCAT("example", i), value: i } IN example' - ] - ); - $statement->execute(); - - // later on, we can assemble a list of document keys - $keys = []; - for ($i = 1; $i <= 100; ++$i) { - $keys[] = 'example' . $i; - } - // and fetch all the documents at once - $documents = $collectionHandler->lookupByKeys('example', $keys); - var_dump($documents); - - // we can also bulk-remove them: - $result = $collectionHandler->removeByKeys('example', $keys); - - var_dump($result); - - - // drop a collection on the server, using its name, - $result = $collectionHandler->drop('users'); - var_dump($result); - - // drop the other one we created, too - $collectionHandler->drop('example'); -} catch (ArangoConnectException $e) { - print 'Connection error: ' . $e->getMessage() . PHP_EOL; -} catch (ArangoClientException $e) { - print 'Client error: ' . $e->getMessage() . PHP_EOL; -} catch (ArangoServerException $e) { - print 'Server error: ' . $e->getServerCode() . ': ' . $e->getServerMessage() . ' - ' . $e->getMessage() . PHP_EOL; -} - -``` diff --git a/Documentation/Books/Drivers/README.md b/Documentation/Books/Drivers/README.md deleted file mode 100644 index 8542c6d3902f..000000000000 --- a/Documentation/Books/Drivers/README.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -page-toc: - disable: true ---- -ArangoDB VERSION_NUMBER Drivers Documentation -============================================= - -Official drivers ----------------- - -Name | Language | Repository |   ------|----------|------------|------- -[ArangoDB-Java-Driver](Java/README.md) | Java | https://github.com/arangodb/arangodb-java-driver | [Changelog](https://github.com/arangodb/arangodb-java-driver/blob/master/ChangeLog.md#readme) -ArangoDB-Java-Driver-Async | Java | https://github.com/arangodb/arangodb-java-driver-async | [Changelog](https://github.com/arangodb/arangodb-java-driver-async/blob/master/ChangeLog.md#readme) -[ArangoJS](JS/README.md) | JavaScript | https://github.com/arangodb/arangojs | [Changelog](https://github.com/arangodb/arangojs/blob/master/CHANGELOG.md#readme) -[ArangoDB-PHP](PHP/README.md) | PHP | https://github.com/arangodb/arangodb-php | [Changelog](https://github.com/arangodb/arangodb-php/blob/devel/CHANGELOG.md#readme) -[Go-Driver](GO/README.md) | Go | https://github.com/arangodb/go-driver | [Changelog](https://github.com/arangodb/go-driver/blob/master/CHANGELOG.md#readme) - -Integrations ------------- - -Name | Language | Repository |   ------|----------|------------|------- -[Spring Data](SpringData/README.md) | Java | https://github.com/arangodb/spring-data | [Changelog](https://github.com/arangodb/spring-data/blob/master/ChangeLog.md#readme) -[ArangoDB-Spark-Connector](SparkConnector/README.md) | Scala, Java | https://github.com/arangodb/arangodb-spark-connector | [Changelog](https://github.com/arangodb/arangodb-spark-connector/blob/master/ChangeLog.md#readme) - -Community drivers ------------------ - -Please note that this list is not exhaustive. - -Name | Language | Repository ------|----------|----------- -ArangoDB-PHP-Core | PHP | https://github.com/frankmayer/ArangoDB-PHP-Core -ArangoDB-NET | .NET | https://github.com/yojimbo87/ArangoDB-NET -aranGO | Go | https://github.com/diegogub/aranGO -arangolite | Go | https://github.com/solher/arangolite -aranGoDriver | Go | https://github.com/TobiEiss/aranGoDriver -pyArango | Python | http://www.github.com/tariqdaouda/pyArango -python-arango | Python | https://github.com/Joowani/python-arango -Scarango | Scala | https://github.com/outr/scarango -ArangoRB | Ruby | https://github.com/StefanoMartin/ArangoRB diff --git a/Documentation/Books/Drivers/SUMMARY.md b/Documentation/Books/Drivers/SUMMARY.md deleted file mode 100644 index 61b4e2a78659..000000000000 --- a/Documentation/Books/Drivers/SUMMARY.md +++ /dev/null @@ -1,116 +0,0 @@ - -# Summary - -* [Introduction](README.md) - -## Official Drivers - - -* [Java Driver](Java/README.md) - * [Getting Started](Java/GettingStarted/README.md) - * [Reference](Java/Reference/README.md) - * [Driver Setup](Java/Reference/Setup.md) - * [Database](Java/Reference/Database/README.md) - * [Database Manipulation](Java/Reference/Database/DatabaseManipulation.md) - * [Collection Access](Java/Reference/Database/CollectionAccess.md) - * [View Access](Java/Reference/Database/ViewAccess.md) - * [Queries](Java/Reference/Database/Queries.md) - * [AQL User Functions](Java/Reference/Database/AqlUserFunctions.md) - * [Transactions](Java/Reference/Database/Transactions.md) - * [Graph Access](Java/Reference/Database/GraphAccess.md) - * [HTTP Routes](Java/Reference/Database/HttpRoutes.md) - * [Collection](Java/Reference/Collection/README.md) - * [Collection Manipulation](Java/Reference/Collection/CollectionManipulation.md) - * [Document Manipulation](Java/Reference/Collection/DocumentManipulation.md) - * [Indexes](Java/Reference/Collection/Indexes.md) - * [Bulk Import](Java/Reference/Collection/BulkImport.md) - * [View](Java/Reference/View/README.md) - * [View Manipulation](Java/Reference/View/ViewManipulation.md) - * [ArangoSearch Views](Java/Reference/View/ArangoSearch.md) - * [Cursor](Java/Reference/Cursor.md) - * [Graph](Java/Reference/Graph/README.md) - * [Vertex Collection](Java/Reference/Graph/VertexCollection.md) - * [Edge Collection](Java/Reference/Graph/EdgeCollection.md) - * [Vertices Manipulation](Java/Reference/Graph/Vertices.md) - * [Edges Manipulation](Java/Reference/Graph/Edges.md) - * [Route](Java/Reference/Route.md) - * [Serialization](Java/Reference/Serialization.md) - -* [ArangoJS - JavaScript Driver](JS/README.md) - * [Getting Started](JS/GettingStarted/README.md) - * [Reference](JS/Reference/README.md) - * [Database](JS/Reference/Database/README.md) - * [Database Manipulation](JS/Reference/Database/DatabaseManipulation.md) - * [Collection Access](JS/Reference/Database/CollectionAccess.md) - * [View Access](JS/Reference/Database/ViewAccess.md) - * [Queries](JS/Reference/Database/Queries.md) - * [AQL User Functions](JS/Reference/Database/AqlUserFunctions.md) - * [Transactions](JS/Reference/Database/Transactions.md) - * [Graph Access](JS/Reference/Database/GraphAccess.md) - * [Foxx Services](JS/Reference/Database/FoxxServices.md) - * [HTTP Routes](JS/Reference/Database/HttpRoutes.md) - * [Collection](JS/Reference/Collection/README.md) - * [Collection Manipulation](JS/Reference/Collection/CollectionManipulation.md) - * [Document Manipulation](JS/Reference/Collection/DocumentManipulation.md) - * [DocumentCollection](JS/Reference/Collection/DocumentCollection.md) - * [EdgeCollection](JS/Reference/Collection/EdgeCollection.md) - * [Indexes](JS/Reference/Collection/Indexes.md) - * [Simple Queries](JS/Reference/Collection/SimpleQueries.md) - * [Bulk Import](JS/Reference/Collection/BulkImport.md) - * [AQL Helpers](JS/Reference/Aql.md) - * [View Manipulation](JS/Reference/ViewManipulation.md) - * [Cursor](JS/Reference/Cursor.md) - * [Graph](JS/Reference/Graph/README.md) - * [Vertices](JS/Reference/Graph/Vertices.md) - * [Edges](JS/Reference/Graph/Edges.md) - * [VertexCollection](JS/Reference/Graph/VertexCollection.md) - * [EdgeCollection](JS/Reference/Graph/EdgeCollection.md) - * [Route](JS/Reference/Route.md) - -* [ArangoDB-PHP](PHP/README.md) - * [Getting Started](PHP/GettingStarted/README.md) - * [Tutorial](PHP/Tutorial/README.md) - -* [ArangoDB Go Driver](GO/README.md) - * [Getting Started](GO/GettingStarted/README.md) - * [Example Requests](GO/ExampleRequests/README.md) - * [Connection Management](GO/ConnectionManagement/README.md) - -## Integrations - - -* [Spring Data ArangoDB](SpringData/README.md) - * [Getting Started](SpringData/GettingStarted/README.md) - * [Reference](SpringData/Reference/README.md) - * [Template](SpringData/Reference/Template/README.md) - * [Queries](SpringData/Reference/Template/Queries.md) - * [Document Manipulation](SpringData/Reference/Template/DocumentManipulation.md) - * [Multiple Document Manipulation](SpringData/Reference/Template/MultiDocumentManipulation.md) - * [Collection Manipulation](SpringData/Reference/Template/CollectionManipulation.md) - * [Repositories](SpringData/Reference/Repositories/README.md) - * [Queries](SpringData/Reference/Repositories/Queries/README.md) - * [Derived queries](SpringData/Reference/Repositories/Queries/DerivedQueries.md) - * [Query methods](SpringData/Reference/Repositories/Queries/QueryMethods.md) - * [Named queries](SpringData/Reference/Repositories/Queries/NamedQueries.md) - * [Document Manipulation](SpringData/Reference/Repositories/DocumentManipulation.md) - * [Multiple Document Manipulation](SpringData/Reference/Repositories/MultiDocumentManipulation.md) - * [Query by example](SpringData/Reference/Repositories/QueryByExample.md) - * [Mapping](SpringData/Reference/Mapping/README.md) - * [Document](SpringData/Reference/Mapping/Document.md) - * [Edge](SpringData/Reference/Mapping/Edge.md) - * [Reference](SpringData/Reference/Mapping/Reference.md) - * [Relations](SpringData/Reference/Mapping/Relations.md) - * [Indexes](SpringData/Reference/Mapping/Indexes.md) - * [Converter](SpringData/Reference/Mapping/Converter.md) - * [Events](SpringData/Reference/Mapping/Events.md) - * [Auditing](SpringData/Reference/Mapping/Auditing.md) - * [Migration](SpringData/Migration/README.md) - * [Migrating 1.x to 3.0](SpringData/Migration/Migrating-1.x-3.0.md) - * [Migrating 2.x to 3.0](SpringData/Migration/Migrating-2.x-3.0.md) - -* [ArangoDB Spark Connector](SparkConnector/README.md) - * [Getting Started](SparkConnector/GettingStarted/README.md) - * [Reference](SparkConnector/Reference/README.md) - * [Java](SparkConnector/Reference/Java.md) - * [Scala](SparkConnector/Reference/Scala.md) - diff --git a/Documentation/Books/Drivers/SparkConnector/GettingStarted/README.md b/Documentation/Books/Drivers/SparkConnector/GettingStarted/README.md deleted file mode 100644 index 44aa66947afb..000000000000 --- a/Documentation/Books/Drivers/SparkConnector/GettingStarted/README.md +++ /dev/null @@ -1,63 +0,0 @@ - -# ArangoDB Spark Connector - Getting Started - -## Maven - -```XML - - - com.arangodb - arangodb-spark-connector - 1.0.2 - - .... - -``` - -## SBT - -```Json -libraryDependencies += "com.arangodb" % "arangodb-spark-connector" % "1.0.2" -``` - -## Configuration - -| property-key | description | default value | -| ------------------------------ | -------------------------------------- | -------------- | -| arangodb.hosts | comma separated list of ArangoDB hosts | 127.0.0.1:8529 | -| arangodb.user | basic authentication user | root | -| arangodb.password | basic authentication password | | -| arangodb.protocol | network protocol | VST | -| arangodb.useSsl | use SSL connection | false | -| arangodb.ssl.keyStoreFile | SSL certificate keystore file | | -| arangodb.ssl.passPhrase | SSL pass phrase | | -| arangodb.ssl.protocol | SSL protocol | TLS | -| arangodb.maxConnections | max number of connections per host | 1 | -| arangodb.acquireHostList | auto acquire list of available hosts | false | -| arangodb.loadBalancingStrategy | load balancing strategy to be used | NONE | - -## Setup SparkContext - -**Scala** - -```Scala -val conf = new SparkConf() - .set("arangodb.hosts", "127.0.0.1:8529") - .set("arangodb.user", "myUser") - .set("arangodb.password", "myPassword") - ... - -val sc = new SparkContext(conf) -``` - -**Java** - -```Java -SparkConf conf = new SparkConf() - .set("arangodb.hosts", "127.0.0.1:8529") - .set("arangodb.user", "myUser") - .set("arangodb.password", "myPassword"); - ... - -JavaSparkContext sc = new JavaSparkContext(conf); -``` diff --git a/Documentation/Books/Drivers/SparkConnector/README.md b/Documentation/Books/Drivers/SparkConnector/README.md deleted file mode 100644 index 74af7cdb4f3f..000000000000 --- a/Documentation/Books/Drivers/SparkConnector/README.md +++ /dev/null @@ -1,6 +0,0 @@ - -# ArangoDB Spark Connector - -- [Getting Started](GettingStarted/README.md) -- [Reference](Reference/README.md) -- [Changelog](https://github.com/arangodb/arangodb-spark-connector/blob/master/ChangeLog.md#readme) diff --git a/Documentation/Books/Drivers/SparkConnector/Reference/Java.md b/Documentation/Books/Drivers/SparkConnector/Reference/Java.md deleted file mode 100644 index 0e0295d0d06a..000000000000 --- a/Documentation/Books/Drivers/SparkConnector/Reference/Java.md +++ /dev/null @@ -1,224 +0,0 @@ - -# ArangoDB Spark Connector - Java Reference - -## ArangoSpark.save - -``` -ArangoSpark.save[T](rdd: JavaRDD[T], collection: String, options: WriteOptions) -``` - -``` -ArangoSpark.save[T](dataset: Dataset[T], collection: String, options: WriteOptions) -``` - -Save data from rdd into ArangoDB - -**Arguments** - -- **rdd**: `JavaRDD[T]` - - The rdd with the data to save - -- **collection**: `String` - - The collection to save in - -- **options**: `WriteOptions` - - - **database**: `String` - - Database to write into - - - **hosts**: `String` - - Alternative hosts to context property `arangodb.hosts` - - - **user**: `String` - - Alternative user to context property `arangodb.user` - - - **password**: `String` - - Alternative password to context property `arangodb.password` - - - **useSsl**: `Boolean` - - Alternative useSsl to context property `arangodb.useSsl` - - - **sslKeyStoreFile**: `String` - - Alternative sslKeyStoreFile to context property `arangodb.ssl.keyStoreFile` - - - **sslPassPhrase**: `String` - - Alternative sslPassPhrase to context property `arangodb.ssl.passPhrase` - - - **sslProtocol**: `String` - - Alternative sslProtocol to context property `arangodb.ssl.protocol` - -**Examples** - -```Java -JavaSparkContext sc = ... -List docs = ... -JavaRDD documents = sc.parallelize(docs); -ArangoSpark.save(documents, "myCollection", new WriteOptions().database("myDB")); -``` - -**Very Large Datasets** - -To prevent errors on very large datasets (over one million objects) use "repartition" for smaller chunks: - -```Java -ArangoSpark.save(allEdges.toJSON.repartition(20000), collection = "mio_edges", options = writeOptions) -``` - - -## ArangoSpark.saveDF - -``` -ArangoSpark.saveDF(dataframe: DataFrame, collection: String, options: WriteOptions) -``` - -Save data from dataframe into ArangoDB - -**Arguments** - -- **dataframe**: DataFrame` - - The dataFrame with the data to save - -- **collection**: `String` - - The collection to save in - -- **options**: `WriteOptions` - - - **database**: `String` - - Database to write into - - - **hosts**: `String` - - Alternative hosts to context property `arangodb.hosts` - - - **user**: `String` - - Alternative user to context property `arangodb.user` - - - **password**: `String` - - Alternative password to context property `arangodb.password` - - - **useSsl**: `Boolean` - - Alternative useSsl to context property `arangodb.useSsl` - - - **sslKeyStoreFile**: `String` - - Alternative sslKeyStoreFile to context property `arangodb.ssl.keyStoreFile` - - - **sslPassPhrase**: `String` - - Alternative sslPassPhrase to context property `arangodb.ssl.passPhrase` - - - **sslProtocol**: `String` - - Alternative sslProtocol to context property `arangodb.ssl.protocol` - -**Examples** - -```Java -JavaSparkContext sc = ... -List docs = ... -JavaRDD documents = sc.parallelize(docs); -SQLContext sql = SQLContext.getOrCreate(sc); -DataFrame df = sql.createDataFrame(documents, MyBean.class); -ArangoSpark.saveDF(documents, "myCollection", new WriteOptions().database("myDB")); -``` - -## ArangoSpark.load - -``` -ArangoSparkload[T](sparkContext: JavaSparkContext, collection: String, options: ReadOptions, clazz: Class[T]): ArangoJavaRDD[T] -``` - -Load data from ArangoDB into rdd - -**Arguments** - -- **sparkContext**: `JavaSparkContext` - - The sparkContext containing the ArangoDB configuration - -- **collection**: `String` - - The collection to load data from - -- **options**: `ReadOptions` - - - **database**: `String` - - Database to write into - - - **hosts**: `String` - - Alternative hosts to context property `arangodb.hosts` - - - **user**: `String` - - Alternative user to context property `arangodb.user` - - - **password**: `String` - - Alternative password to context property `arangodb.password` - - - **useSsl**: `Boolean` - - Alternative useSsl to context property `arangodb.useSsl` - - - **sslKeyStoreFile**: `String` - - Alternative sslKeyStoreFile to context property `arangodb.ssl.keyStoreFile` - - - **sslPassPhrase**: `String` - - Alternative sslPassPhrase to context property `arangodb.ssl.passPhrase` - - - **sslProtocol**: `String` - - Alternative sslProtocol to context property `arangodb.ssl.protocol` - -- **clazz**: `Class[T]` - - The type of the document - -**Examples** - -```Java -JavaSparkContext sc = ... -ArangoJavaRDD rdd = ArangoSpark.load(sc, "myCollection", new ReadOptions().database("myDB"), MyBean.class); -``` - -## ArangoRDD.filter - -``` -ArangoJavaRDD.filter(condition: String): ArangoJavaRDD[T] -``` - -Adds a filter condition. If used multiple times, the conditions will be combined with a logical AND. - -**Arguments** - -- **condition**: `String` - - The condition for the filter statement. Use `doc` inside to reference the document. e.g. `"doc.name == 'John'"` - -**Examples** - -```Java -JavaSparkContext sc = ... -ArangoJavaRDD rdd = ArangoSpark.load(sc, "myCollection", new ReadOptions().database("myDB"), MyBean.class); -ArangoJavaRDD rddFiltered = rdd.filter("doc.test <= 50"); -``` diff --git a/Documentation/Books/Drivers/SparkConnector/Reference/README.md b/Documentation/Books/Drivers/SparkConnector/Reference/README.md deleted file mode 100644 index 83c7c0325651..000000000000 --- a/Documentation/Books/Drivers/SparkConnector/Reference/README.md +++ /dev/null @@ -1,5 +0,0 @@ - -# ArangoDB Spark Connector - Reference - -- [Scala](Scala.md) -- [Java](Java.md) diff --git a/Documentation/Books/Drivers/SparkConnector/Reference/Scala.md b/Documentation/Books/Drivers/SparkConnector/Reference/Scala.md deleted file mode 100644 index 6f7816a8335c..000000000000 --- a/Documentation/Books/Drivers/SparkConnector/Reference/Scala.md +++ /dev/null @@ -1,208 +0,0 @@ - -# ArangoDB Spark Connector - Scala Reference - -## ArangoSpark.save - -``` -ArangoSpark.save[T](rdd: RDD[T], collection: String, options: WriteOptions) -``` - -``` -ArangoSpark.save[T](dataset: Dataset[T], collection: String, options: WriteOptions) -``` - -Save data from rdd or dataset into ArangoDB - -**Arguments** - -- **rdd**/**dataset**: `RDD[T]` or `Dataset[T]` - - The rdd or dataset with the data to save - -- **collection**: `String` - - The collection to save in - -- **options**: `WriteOptions` - - - **database**: `String` - - Database to write into - - - **hosts**: `String` - - Alternative hosts to context property `arangodb.hosts` - - - **user**: `String` - - Alternative user to context property `arangodb.user` - - - **password**: `String` - - Alternative password to context property `arangodb.password` - - - **useSsl**: `Boolean` - - Alternative useSsl to context property `arangodb.useSsl` - - - **sslKeyStoreFile**: `String` - - Alternative sslKeyStoreFile to context property `arangodb.ssl.keyStoreFile` - - - **sslPassPhrase**: `String` - - Alternative sslPassPhrase to context property `arangodb.ssl.passPhrase` - - - **sslProtocol**: `String` - - Alternative sslProtocol to context property `arangodb.ssl.protocol` - -**Examples** - -```Scala -val sc: SparkContext = ... -val documents = sc.parallelize((1 to 100).map { i => MyBean(i) }) -ArangoSpark.save(documents, "myCollection", WriteOptions("myDB")) -``` - -## ArangoSpark.saveDF - -``` -ArangoSpark.saveDF(dataframe: DataFrame, collection: String, options: WriteOptions) -``` - -Save data from dataframe into ArangoDB - -**Arguments** - -- **dataframe**: DataFrame` - - The dataFrame with the data to save - -- **collection**: `String` - - The collection to save in - -- **options**: `WriteOptions` - - - **database**: `String` - - Database to write into - - - **hosts**: `String` - - Alternative hosts to context property `arangodb.hosts` - - - **user**: `String` - - Alternative user to context property `arangodb.user` - - - **password**: `String` - - Alternative password to context property `arangodb.password` - - - **useSsl**: `Boolean` - - Alternative useSsl to context property `arangodb.useSsl` - - - **sslKeyStoreFile**: `String` - - Alternative sslKeyStoreFile to context property `arangodb.ssl.keyStoreFile` - - - **sslPassPhrase**: `String` - - Alternative sslPassPhrase to context property `arangodb.ssl.passPhrase` - - - **sslProtocol**: `String` - - Alternative sslProtocol to context property `arangodb.ssl.protocol` - -**Examples** - -```Scala -val sc: SparkContext = ... -val documents = sc.parallelize((1 to 100).map { i => MyBean(i) }) -val sql: SQLContext = SQLContext.getOrCreate(sc); -val df = sql.createDataFrame(documents, classOf[MyBean]) -ArangoSpark.saveDF(df, "myCollection", WriteOptions("myDB")) -``` - -## ArangoSpark.load - -``` -ArangoSpark.load[T: ClassTag](sparkContext: SparkContext, collection: String, options: ReadOptions): ArangoRDD[T] -``` - -Load data from ArangoDB into rdd - -**Arguments** - -- **sparkContext**: `SparkContext` - - The sparkContext containing the ArangoDB configuration - -- **collection**: `String` - - The collection to load data from - -- **options**: `ReadOptions` - - - **database**: `String` - - Database to write into - - - **hosts**: `String` - - Alternative hosts to context property `arangodb.hosts` - - - **user**: `String` - - Alternative user to context property `arangodb.user` - - - **password**: `String` - - Alternative password to context property `arangodb.password` - - - **useSsl**: `Boolean` - - Alternative useSsl to context property `arangodb.useSsl` - - - **sslKeyStoreFile**: `String` - - Alternative sslKeyStoreFile to context property `arangodb.ssl.keyStoreFile` - - - **sslPassPhrase**: `String` - - Alternative sslPassPhrase to context property `arangodb.ssl.passPhrase` - - - **sslProtocol**: `String` - - Alternative sslProtocol to context property `arangodb.ssl.protocol` - -**Examples** - -```Scala -val sc: SparkContext = ... -val rdd = ArangoSpark.load[MyBean](sc, "myCollection", ReadOptions("myDB")) -``` - -## ArangoRDD.filter - -``` -ArangoRDD.filter(condition: String): ArangoRDD[T] -``` - -Adds a filter condition. If used multiple times, the conditions will be combined with a logical AND. - -**Arguments** - -- **condition**: `String` - - The condition for the filter statement. Use `doc` inside to reference the document. e.g. `"doc.name == 'John'"` - -**Examples** - -```Scala -val sc: SparkContext = ... -val rdd = ArangoSpark.load[MyBean](sc, "myCollection").filter("doc.name == 'John'") -``` diff --git a/Documentation/Books/Drivers/SpringData/GettingStarted/README.md b/Documentation/Books/Drivers/SpringData/GettingStarted/README.md deleted file mode 100644 index 5e9ae5101e13..000000000000 --- a/Documentation/Books/Drivers/SpringData/GettingStarted/README.md +++ /dev/null @@ -1,119 +0,0 @@ - -# Spring Data ArangoDB - Getting Started - -## Supported versions - -| Spring Data ArangoDB | Spring Data | ArangoDB | -| -------------------- | ----------- | ----------- | -| 1.3.x | 1.13.x | 3.0\*, 3.1+ | -| 2.3.x | 2.0.x | 3.0\*, 3.1+ | -| 3.0.x | 2.0.x | 3.0\*, 3.1+ | - -Spring Data ArangoDB requires ArangoDB 3.0 or higher - which you can download [here](https://www.arangodb.com/download/) - and Java 8 or higher. - -**Note**: ArangoDB 3.0 does not support the default transport protocol -[VelocyStream](https://github.com/arangodb/velocystream). A manual switch to -HTTP is required. See chapter [configuration](#configuration). Also ArangoDB 3.0 -does not support geospatial queries. - -## Maven - -To use Spring Data ArangoDB in your project, your build automation tool needs to be configured to include and use the Spring Data ArangoDB dependency. Example with Maven: - -```xml - - com.arangodb - arangodb-spring-data - 3.1.0 - -``` - -There is a [demonstration app](https://github.com/arangodb/spring-data-demo), which contains common use cases and examples of how to use Spring Data ArangoDB's functionality. - -## Configuration - -You can use Java to configure your Spring Data environment as show below. Setting up the underlying driver (`ArangoDB.Builder`) with default configuration automatically loads a properties file `arangodb.properties`, if it exists in the classpath. - -```java -@Configuration -@EnableArangoRepositories(basePackages = { "com.company.mypackage" }) -public class MyConfiguration extends AbstractArangoConfiguration { - - @Override - public ArangoDB.Builder arango() { - return new ArangoDB.Builder(); - } - - @Override - public String database() { - // Name of the database to be used - return "example-database"; - } - -} -``` - -The driver is configured with some default values: - -| property-key | description | default value | -| ----------------- | ----------------------------------- | ------------- | -| arangodb.host | ArangoDB host | 127.0.0.1 | -| arangodb.port | ArangoDB port | 8529 | -| arangodb.timeout | socket connect timeout(millisecond) | 0 | -| arangodb.user | Basic Authentication User | -| arangodb.password | Basic Authentication Password | -| arangodb.useSsl | use SSL connection | false | - -To customize the configuration, the parameters can be changed in the Java code. - -```java -@Override -public ArangoDB.Builder arango() { - ArangoDB.Builder arango = new ArangoDB.Builder() - .host("127.0.0.1") - .port(8529) - .user("root"); - return arango; -} -``` - -In addition you can use the _arangodb.properties_ or a custom properties file to supply credentials to the driver. - -_Properties file_ - -``` -arangodb.hosts=127.0.0.1:8529 -arangodb.user=root -arangodb.password= -``` - -_Custom properties file_ - -```java -@Override -public ArangoDB.Builder arango() { - InputStream in = MyClass.class.getResourceAsStream("my.properties"); - ArangoDB.Builder arango = new ArangoDB.Builder() - .loadProperties(in); - return arango; -} -``` - -**Note**: When using ArangoDB 3.0 it is required to set the transport protocol to HTTP and fetch the dependency `org.apache.httpcomponents:httpclient`. - -```java -@Override -public ArangoDB.Builder arango() { - ArangoDB.Builder arango = new ArangoDB.Builder() - .useProtocol(Protocol.HTTP_JSON); - return arango; -} -``` - -```xml - - org.apache.httpcomponents - httpclient - 4.5.1 - -``` diff --git a/Documentation/Books/Drivers/SpringData/Migration/Migrating-1.x-3.0.md b/Documentation/Books/Drivers/SpringData/Migration/Migrating-1.x-3.0.md deleted file mode 100644 index e571f44bc9a7..000000000000 --- a/Documentation/Books/Drivers/SpringData/Migration/Migrating-1.x-3.0.md +++ /dev/null @@ -1,4 +0,0 @@ - -# Migrating Spring Data ArangoDB 1.x to 3.0 - -see [Migrating 2.x to 3.0](Migrating-2.x-3.0.md) diff --git a/Documentation/Books/Drivers/SpringData/Migration/Migrating-2.x-3.0.md b/Documentation/Books/Drivers/SpringData/Migration/Migrating-2.x-3.0.md deleted file mode 100644 index 2b99f88a4563..000000000000 --- a/Documentation/Books/Drivers/SpringData/Migration/Migrating-2.x-3.0.md +++ /dev/null @@ -1,42 +0,0 @@ - -# Migrating Spring Data ArangoDB 2.x to 3.0 - -## Annotations @Key - -The annotation `@Key` is removed. Use `@Id` instead. - -## Annotations @Id - -The annotation `@Id` is now saved in the database as field `_key` instead of `_id`. All operations in `ArangoOperations` and `ArangoRepository` still work with `@Id` and also now supports non-String fields. - -If you - for some reason - need the value of `_id` within your application, you can use the annotation `@ArangoId` on a `String` field instead of `@Id`. - -**Note**: The field annotated with `@ArangoId` will not be persisted in the database. It only exists for reading purposes. - -## ArangoRepository - -`ArangoRepository` now requires a second generic type. This type `ID` represents the type of your domain object field annotated with `@Id`. - -**Examples** - -```Java -public class Customer { - @Id private String id; -} - -public interface CustomerRepository extends ArangoRepository { - -} -``` - -## Annotation @Param - -The annotation `com.arangodb.springframework.annotation.Param` is removed. Use `org.springframework.data.repository.query.Param` instead. - -## DBEntity - -`DBEntity` is removed. Use `VPackSlice` in your converter instead. - -## DBCollectionEntity - -`DBCollectionEntity` is removed. Use `VPackSlice` in your converter instead. diff --git a/Documentation/Books/Drivers/SpringData/Migration/README.md b/Documentation/Books/Drivers/SpringData/Migration/README.md deleted file mode 100644 index 5e036a2726ac..000000000000 --- a/Documentation/Books/Drivers/SpringData/Migration/README.md +++ /dev/null @@ -1,5 +0,0 @@ - -# Spring Data ArangoDB - Migration - -- [Migrating 1.x to 3.0](Migrating-1.x-3.0.md) -- [Migrating 2.x to 3.0](Migrating-2.x-3.0.md) diff --git a/Documentation/Books/Drivers/SpringData/README.md b/Documentation/Books/Drivers/SpringData/README.md deleted file mode 100644 index 4ac5dacfa655..000000000000 --- a/Documentation/Books/Drivers/SpringData/README.md +++ /dev/null @@ -1,14 +0,0 @@ - -# Spring Data ArangoDB - -- [Getting Started](GettingStarted/README.md) -- [Reference](Reference/README.md) -- [Migration](Migration/README.md) - -## Learn more - -- [ArangoDB](https://www.arangodb.com/) -- [Demo](https://github.com/arangodb/spring-data-demo) -- [JavaDoc 1.0.0](http://arangodb.github.io/spring-data/javadoc-1_0/index.html) -- [JavaDoc 2.0.0](http://arangodb.github.io/spring-data/javadoc-2_0/index.html) -- [Changelog](https://github.com/arangodb/spring-data/blob/master/ChangeLog.md#changelog) diff --git a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Auditing.md b/Documentation/Books/Drivers/SpringData/Reference/Mapping/Auditing.md deleted file mode 100644 index efafff04091f..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Auditing.md +++ /dev/null @@ -1,78 +0,0 @@ - -# Auditing - -Since version 3.0.0 Spring Data ArangoDB provides basic auditing functionallity where you can track who made changes on your data and when. - -To enable auditing you have to add the annotation `@EnableArangoAuditing` to your configuration class. - -```Java -@Configuration -@EnableArangoAuditing -public class MyConfiguration extends AbstractArangoConfiguration { -``` - -We can now add fields to our model classes and annotade them with `@CreateDate`, `@CreatedBy`, `@LastModifiedDate` and `@LastModifiedBy` to store the auditing information. All annotation names should be self-explanatory. - -```Java -@Document -public class MyEntity { - - @CreatedDate - private Instant created; - - @CreatedBy - private User createdBy; - - @LastModifiedDate - private Instant modified; - - @LastModifiedBy - private User modifiedBy; - -} -``` - -The annotations `@CreateDate` and `@LastModifiedDate` are working with fields of any kind of Date/Timestamp type which is supported by Spring Data. (i.e. `java.util.Date`, `java.time.Instant`, `java.time.LocalDateTime`). - -For `@CreatedBy` and `@LastModifiedBy` we need to provide Spring Data the information of the current auditor (i.e. `User` in our case). We can do so by implementing the `AuditorAware` interface - -```Java -public class AuditorProvider implements AuditorAware { - @Override - public Optional getCurrentAuditor() { - // return current user - } -} -``` - -and add the implementation as a bean to our Spring context. - -```Java -@Configuration -@EnableArangoAuditing(auditorAwareRef = "auditorProvider") -public class MyConfiguration extends AbstractArangoConfiguration { - - @Bean - public AuditorAware auditorProvider() { - return new AuditorProvider(); - } - -} -``` - -If you use a type in your `AuditorAware` implementation, which will be also persisted in your database and you only want to save a reference in your entity, just add the [@Ref annotation](Reference.md) to the fields annotated with `@CreatedBy` and `@LastModifiedBy`. Keep in mind that you have to save the `User` in your database first to get a valid reference. - -```Java -@Document -public class MyEntity { - - @Ref - @CreatedBy - private User createdBy; - - @Ref - @LastModifiedBy - private User modifiedBy; - -} -``` diff --git a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Converter.md b/Documentation/Books/Drivers/SpringData/Reference/Mapping/Converter.md deleted file mode 100644 index 5e951dd2e160..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Converter.md +++ /dev/null @@ -1,45 +0,0 @@ - -# Converter - -## Registering a Spring Converter - -The `AbstractArangoConfiguration` provides a convenient way to register Spring `Converter` by overriding the method `customConverters()`. - -**Examples** - -```Java -@Configuration -public class MyConfiguration extends AbstractArangoConfiguration { - - @Override - protected Collection> customConverters() { - Collection> converters = new ArrayList<>(); - converters.add(new MyConverter()); - return converters; - } - -} -``` - -## Implementing a Spring Converter - -A `Converter` is used for reading if the source type is of type `VPackSlice` or `DBDocumentEntity`. - -A `Converter` is used for writing if the target type is of type `VPackSlice`, `DBDocumentEntity`, `BigInteger`, `BigDecimal`, `java.sql.Date`, `java.sql.Timestamp`, `Instant`, `LocalDate`, `LocalDateTime`, `OffsetDateTime`, `ZonedDateTime`, `Boolean`, `Short`, `Integer`, `Byte`, `Float`, `Double`, `Character`, `String`, `Date`, `Class`, `Enum`, `boolean[]`, `long[]`, `short[]`, `int[]`, `byte[]`, `float[]`, `double[]` or `char[]`. - -**Examples** - -```Java -public class MyConverter implements Converter { - - @Override - public VPackSlice convert(final MyObject source) { - VPackBuilder builder = new VPackBuilder(); - // map fields of MyObject to builder - return builder.slice(); - } - -} -``` - -For performance reasons `VPackSlice` should always be used within a converter. If your object is too complexe, you can also use `DBDocumentEntity` to simplify the mapping. diff --git a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Document.md b/Documentation/Books/Drivers/SpringData/Reference/Mapping/Document.md deleted file mode 100644 index 020f60ae35ea..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Document.md +++ /dev/null @@ -1,81 +0,0 @@ - -# Document - -## Annotation @Document - -The annotations `@Document` applied to a class marks this class as a candidate for mapping to the database. The most relevant parameter is `value` to specify the collection name in the database. The annotation `@Document` specifies the collection type to `DOCUMENT`. - -```java -@Document(value="persons") -public class Person { - ... -} -``` - -## Spring Expression support - -Spring Data ArangoDB supports the use of SpEL expressions within `@Document#value`. This feature lets you define a dynamic collection name which can be used to implement multi tenancy applications. - -```Java -@Component -public class TenantProvider { - - public String getId() { - // threadlocal lookup - } - -} -``` - -```java -@Document("#{tenantProvider.getId()}_persons") -public class Person { - ... -} -``` - -## Annotation @From and @To - -With the annotations `@From` and `@To` applied on a collection or array field in a class annotated with `@Document` the nested edge objects are fetched from the database. Each of the nested edge objects has to be stored as separate edge document in the edge collection described in the `@Edge` annotation of the nested object class with the _\_id_ of the parent document as field _\_from_ or _\_to_. - -```java -@Document("persons") -public class Person { - @From - private List relations; -} - -@Edge(name="relations") -public class Relation { - ... -} -``` - -The database representation of `Person` in collection _persons_ looks as follow: - -``` -{ - "_key" : "123", - "_id" : "persons/123" -} -``` - -and the representation of `Relation` in collection _relations_: - -``` -{ - "_key" : "456", - "_id" : "relations/456", - "_from" : "persons/123" - "_to" : ".../..." -} -{ - "_key" : "789", - "_id" : "relations/456", - "_from" : "persons/123" - "_to" : ".../..." -} -... -``` - -**Note**: Since arangodb-spring-data 3.0.0 the annotations `@From` and `@To` also work on non-collection/non-array fields. If multiple edges are linked with the entity, it is not guaranteed that the same edge is returned every time. Use at your own risk. diff --git a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Edge.md b/Documentation/Books/Drivers/SpringData/Reference/Mapping/Edge.md deleted file mode 100644 index f5b8453c9f5a..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Edge.md +++ /dev/null @@ -1,81 +0,0 @@ - -# Edge - -## Annotation @Edge - -The annotations `@Edge` applied to a class marks this class as a candidate for mapping to the database. The most relevant parameter is `value` to specify the collection name in the database. The annotation `@Edge` specifies the collection type to `EDGE`. - -```java -@Edge("relations") -public class Relation { - ... -} -``` - -## Spring Expression support - -Spring Data ArangoDB supports the use of SpEL expressions within `@Edge#value`. This feature lets you define a dynamic collection name which can be used to implement multi tenancy applications. - -```Java -@Component -public class TenantProvider { - - public String getId() { - // threadlocal lookup - } - -} -``` - -```java -@Edge("#{tenantProvider.getId()}_relations") -public class Relation { - ... -} -``` - -## Annotation @From and @To - -With the annotations `@From` and `@To` applied on a field in a class annotated with `@Edge` the nested object is fetched from the database. The nested object has to be stored as a separate document in the collection described in the `@Document` annotation of the nested object class. The _\_id_ field of this nested object is stored in the fields `_from` or `_to` within the edge document. - -```java -@Edge("relations") -public class Relation { - @From - private Person c1; - @To - private Person c2; -} - -@Document(value="persons") -public class Person { - @Id - private String id; -} -``` - -The database representation of `Relation` in collection _relations_ looks as follow: - -``` -{ - "_key" : "123", - "_id" : "relations/123", - "_from" : "persons/456", - "_to" : "persons/789" -} -``` - -and the representation of `Person` in collection _persons_: - -``` -{ - "_key" : "456", - "_id" : "persons/456", -} -{ - "_key" : "789", - "_id" : "persons/789", -} -``` - -**Note:** If you want to save an instance of `Relation`, both `Person` objects (from & to) already have to be persisted and the class `Person` needs a field with the annotation `@Id` so it can hold the persisted `_id` from the database. diff --git a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Events.md b/Documentation/Books/Drivers/SpringData/Reference/Mapping/Events.md deleted file mode 100644 index eb78546849ee..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Events.md +++ /dev/null @@ -1,36 +0,0 @@ - -# Events - -Spring Data ArangoDB includes several `ApplicationEvent` events that your application can respond to by registering subclasses of `AbstractArangoEventListener` in the ApplicationContext. - -The following callback methods are present in `AbstractArangoEventListener`: - -- `onAfterLoad`: Called in `ArangoTemplate#find` and `ArangoTemplate#query` after the object is loaded from the database. -- `onBeforeSave`: Called in `ArangoTemplate#insert`/`#update`/`#replace` before the object is converted and send to the database. -- `onAfterSave`: Called in `ArangoTemplate#insert`/`#update`/`#replace` after the object is send to the database. -- `onBeforeDelete`: Called in `ArangoTemplate#delete` before the object is converted and send to the database. -- `onAfterDelete`: Called in `ArangoTemplate#delete` after the object is deleted from the database. - -**Examples** - -```Java -package my.mapping.events; - -public class BeforePersonSavedListener extends AbstractArangoEventListener { - - @Override - public void onBeforeSave(BeforeSaveEvent event) { - // do some logging or data manipulation - } - -} -``` - -To register the listener add `@ComponentScan` with the package of your listener to your configuration class. - -```Java -@Configuration -@ComponentScan("my.mapping.events") -public class MyConfiguration extends AbstractArangoConfiguration { - ... -``` diff --git a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Indexes.md b/Documentation/Books/Drivers/SpringData/Reference/Mapping/Indexes.md deleted file mode 100644 index 5f17058d9629..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Indexes.md +++ /dev/null @@ -1,105 +0,0 @@ - -# Indexes - -## Annotation @\Indexed - -With the `@Indexed` annotations user defined indexes can be created at a collection level by annotating single fields of a class. - -Possible `@Indexed` annotations are: - -- `@HashIndexed` -- `@SkiplistIndexed` -- `@PersistentIndexed` -- `@GeoIndexed` -- `@FulltextIndexed` - -The following example creates a hash index on the field `name` and a separate hash index on the field `age`: - -```java -public class Person { - @HashIndexed - private String name; - - @HashIndexed - private int age; -} -``` - -With the `@Indexed` annotations different indexes can be created on the same field. - -The following example creates a hash index and also a skiplist index on the field `name`: - -```java -public class Person { - @HashIndexed - @SkiplistIndexed - private String name; -} -``` - -## Annotation @\Index - -If the index should include multiple fields the `@Index` annotations can be used on the type instead. - -Possible `@Index` annotations are: - -- `@HashIndex` -- `@SkiplistIndex` -- `@PersistentIndex` -- `@GeoIndex` -- `@FulltextIndex` - -The following example creates a single hash index on the fields `name` and `age`, note that if a field is renamed in the database with @Field, the new field name must be used in the index declaration: - -```java -@HashIndex(fields = {"fullname", "age"}) -public class Person { - @Field("fullname") - private String name; - - private int age; -} -``` - -The `@Index` annotations can also be used to create an index on a nested field. - -The following example creates a single hash index on the fields `name` and `address.country`: - -```java -@HashIndex(fields = {"name", "address.country"}) -public class Person { - private String name; - - private Address address; -} -``` - -The `@Index` annotations and the `@Indexed` annotations can be used at the same time in one class. - -The following example creates a hash index on the fields `name` and `age` and a separate hash index on the field `age`: - -```java -@HashIndex(fields = {"name", "age"}) -public class Person { - private String name; - - @HashIndexed - private int age; -} -``` - -The `@Index` annotations can be used multiple times to create more than one index in this way. - -The following example creates a hash index on the fields `name` and `age` and a separate hash index on the fields `name` and `gender`: - -```java -@HashIndex(fields = {"name", "age"}) -@HashIndex(fields = {"name", "gender"}) -public class Person { - private String name; - - private int age; - - private Gender gender -} -``` diff --git a/Documentation/Books/Drivers/SpringData/Reference/Mapping/README.md b/Documentation/Books/Drivers/SpringData/Reference/Mapping/README.md deleted file mode 100644 index 6e4dae3020fb..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Mapping/README.md +++ /dev/null @@ -1,165 +0,0 @@ - -# Mapping - -In this section we will describe the features and conventions for mapping Java objects to documents and how to override those conventions with annotation based mapping metadata. - -## Conventions - -- The Java class name is mapped to the collection name -- The non-static fields of a Java object are used as fields in the stored document -- The Java field name is mapped to the stored document field name -- All nested Java object are stored as nested objects in the stored document -- The Java class needs a constructor which meets the following criteria: - - in case of a single constructor: - - a non-parameterized constructor or - - a parameterized constructor - - in case of multiple constructors: - - a non-parameterized constructor or - - a parameterized constructor annotated with `@PersistenceConstructor` - -## Type conventions - -ArangoDB uses [VelocyPack](https://github.com/arangodb/velocypack) as it's internal storage format which supports a large number of data types. In addition Spring Data ArangoDB offers - with the underlying Java driver - built-in converters to add additional types to the mapping. - -| Java type | VelocyPack type | -| ------------------------ | ----------------------------- | -| java.lang.String | string | -| java.lang.Boolean | bool | -| java.lang.Integer | signed int 4 bytes, smallint | -| java.lang.Long | signed int 8 bytes, smallint | -| java.lang.Short | signed int 2 bytes, smallint | -| java.lang.Double | double | -| java.lang.Float | double | -| java.math.BigInteger | string | -| java.math.BigDecimal | string | -| java.lang.Number | double | -| java.lang.Character | string | -| java.util.UUID | string | -| java.lang.byte[] | string (Base64) | -| java.util.Date | string (date-format ISO 8601) | -| java.sql.Date | string (date-format ISO 8601) | -| java.sql.Timestamp | string (date-format ISO 8601) | -| java.time.Instant | string (date-format ISO 8601) | -| java.time.LocalDate | string (date-format ISO 8601) | -| java.time.LocalDateTime | string (date-format ISO 8601) | -| java.time.OffsetDateTime | string (date-format ISO 8601) | -| java.time.ZonedDateTime | string (date-format ISO 8601) | - -## Type mapping - -As collections in ArangoDB can contain documents of various types, a mechanism to retrieve the correct Java class is required. The type information of properties declared in a class may not be enough to restore the original class (due to inheritance). If the declared complex type and the actual type do not match, information about the actual type is stored together with the document. This is necessary to restore the correct type when reading from the DB. Consider the following example: - -```java -public class Person { - private String name; - private Address homeAddress; - // ... - - // getters and setters omitted -} - -public class Employee extends Person { - private Address workAddress; - // ... - - // getters and setters omitted -} - -public class Address { - private final String street; - private final String number; - // ... - - public Address(String street, String number) { - this.street = street; - this.number = number; - } - - // getters omitted -} - -@Document -public class Company { - @Key - private String key; - private Person manager; - - // getters and setters omitted -} - -Employee manager = new Employee(); -manager.setName("Jane Roberts"); -manager.setHomeAddress(new Address("Park Avenue", "432/64")); -manager.setWorkAddress(new Address("Main Street", "223")); -Company comp = new Company(); -comp.setManager(manager); -``` - -The serialized document for the DB looks like this: - -```json -{ - "manager": { - "name": "Jane Roberts", - "homeAddress": { - "street": "Park Avenue", - "number": "432/64" - }, - "workAddress": { - "street": "Main Street", - "number": "223" - }, - "_class": "com.arangodb.Employee" - }, - "_class": "com.arangodb.Company" -} -``` - -Type hints are written for top-level documents (as a collection can contain different document types) as well as for every value if it's a complex type and a sub-type of the property type declared. `Map`s and `Collection`s are excluded from type mapping. Without the additional information about the concrete classes used, the document couldn't be restored in Java. The type information of the `manager` property is not enough to determine the `Employee` type. The `homeAddress` and `workAddress` properties have the same actual and defined type, thus no type hint is needed. - -### Customizing type mapping - -By default, the fully qualified class name is stored in the documents as a type hint. A custom type hint can be set with the `@TypeAlias("my-alias")` annotation on an entity. Make sure that it is an unique identifier across all entities. If we would add a `TypeAlias("employee")` annotation to the `Employee` class above, it would be persisted as `"_class": "employee"`. - -The default type key is `_class` and can be changed by overriding the `typeKey()` method of the `AbstractArangoConfiguration` class. - -If you need to further customize the type mapping process, the `arangoTypeMapper()` method of the configuration class can be overridden. The included `DefaultArangoTypeMapper` can be customized by providing a list of [`TypeInformationMapper`](https://docs.spring.io/spring-data/commons/docs/current/api/org/springframework/data/convert/TypeInformationMapper.html)s that create aliases from types and vice versa. - -In order to fully customize the type mapping process you can provide a custom type mapper implementation by extending the `DefaultArangoTypeMapper` class. - -### Deactivating type mapping - -To deactivate the type mapping process, you can return `null` from the `typeKey()` method of the `AbstractArangoConfiguration` class. No type hints are stored in the documents with this setting. If you make sure that each defined type corresponds to the actual type, you can disable the type mapping, otherwise it can lead to exceptions when reading the entities from the DB. - -## Annotations - -### Annotation overview - -| annotation | level | description | -| ----------------------- | ------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | -| @Document | class | marks this class as a candidate for mapping | -| @Edge | class | marks this class as a candidate for mapping | -| @Id | field | stores the field as the system field \_key | -| @Rev | field | stores the field as the system field \_rev | -| @Field("alt-name") | field | stores the field with an alternative name | -| @Ref | field | stores the \_id of the referenced document and not the nested document | -| @From | field | stores the \_id of the referenced document as the system field \_from | -| @To | field | stores the \_id of the referenced document as the system field \_to | -| @Relations | field | vertices which are connected over edges | -| @Transient | field, method, annotation | marks a field to be transient for the mapping framework, thus the property will not be persisted and not further inspected by the mapping framework | -| @PersistenceConstructor | constructor | marks a given constructor - even a package protected one - to use when instantiating the object from the database | -| @TypeAlias("alias") | class | set a type alias for the class when persisted to the DB | -| @HashIndex | class | describes a hash index | -| @HashIndexed | field | describes how to index the field | -| @SkiplistIndex | class | describes a skiplist index | -| @SkiplistIndexed | field | describes how to index the field | -| @PersistentIndex | class | describes a persistent index | -| @PersistentIndexed | field | describes how to index the field | -| @GeoIndex | class | describes a geo index | -| @GeoIndexed | field | describes how to index the field | -| @FulltextIndex | class | describes a fulltext index | -| @FulltextIndexed | field | describes how to index the field | -| @CreatedBy | field | Declares a field as the one representing the principal that created the entity containing the field. | -| @CreatedDate | field | Declares a field as the one representing the date the entity containing the field was created. | -| @LastModifiedBy | field | Declares a field as the one representing the principal that recently modified the entity containing the field. | -| @LastModifiedDate | field | Declares a field as the one representing the date the entity containing the field was recently modified. | diff --git a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Reference.md b/Documentation/Books/Drivers/SpringData/Reference/Mapping/Reference.md deleted file mode 100644 index 37e1e1b8449f..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Reference.md +++ /dev/null @@ -1,56 +0,0 @@ - -# Reference - -With the annotation `@Ref` applied on a field the nested object isn’t stored as a nested object in the document. The `_id` field of the nested object is stored in the document and the nested object has to be stored as a separate document in another collection described in the `@Document` annotation of the nested object class. To successfully persist an instance of your object the referencing field has to be null or it's instance has to provide a field with the annotation `@Id` including a valid id. - -**Examples** - -```java -@Document(value="persons") -public class Person { - @Ref - private Address address; -} - -@Document("addresses") -public class Address { - @Id - private String id; - private String country; - private String street; -} -``` - -The database representation of `Person` in collection _persons_ looks as follow: - -``` -{ - "_key" : "123", - "_id" : "persons/123", - "address" : "addresses/456" -} -``` - -and the representation of `Address` in collection _addresses_: - -``` -{ - "_key" : "456", - "_id" : "addresses/456", - "country" : "...", - "street" : "..." -} -``` - -Without the annotation `@Ref` at the field `address`, the stored document would look: - -``` -{ - "_key" : "123", - "_id" : "persons/123", - "address" : { - "country" : "...", - "street" : "..." - } -} -``` diff --git a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Relations.md b/Documentation/Books/Drivers/SpringData/Reference/Mapping/Relations.md deleted file mode 100644 index f9eae9b7a2d8..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Relations.md +++ /dev/null @@ -1,21 +0,0 @@ - -# Relations - -With the annotation `@Relations` applied on a collection or array field in a class annotated with `@Document` the nested objects are fetched from the database over a graph traversal with your current object as the starting point. The most relevant parameter is `edge`. With `edge` you define the edge collection - which should be used in the traversal - using the class type. With the parameter `depth` you can define the maximal depth for the traversal (default 1) and the parameter `direction` defines whether the traversal should follow outgoing or incoming edges (default Direction.ANY). - -**Examples** - -```java -@Document(value="persons") -public class Person { - @Relations(edge=Relation.class, depth=1, direction=Direction.ANY) - private List friends; -} - -@Edge(name="relations") -public class Relation { - -} -``` - -**Note**: Since arangodb-spring-data 3.0.0 the annotation `@Relations` also work on non-collection/non-array fields. If multiple documents are linked with the entity, it is not guaranteed that the same document is returned every time. Use at your own risk. diff --git a/Documentation/Books/Drivers/SpringData/Reference/README.md b/Documentation/Books/Drivers/SpringData/Reference/README.md deleted file mode 100644 index d612c3ef07ef..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/README.md +++ /dev/null @@ -1,25 +0,0 @@ - -# Spring Data ArangoDB - Reference - -- [Template](Template/README.md) - - [Queries](Template/Queries.md) - - [Document Manipulation](Template/DocumentManipulation.md) - - [Multiple Document Manipulation](Template/MultiDocumentManipulation.md) - - [Collection Manipulation](Template/CollectionManipulation.md) -- [Repositories](Repositories/README.md) - - [Queries](Repositories/Queries/README.md) - - [Derived queries](Repositories/Queries/DerivedQueries.md) - - [Query methods](Repositories/Queries/QueryMethods.md) - - [Named queries](Repositories/Queries/NamedQueries.md) - - [Document Manipulation](Repositories/DocumentManipulation.md) - - [Multiple Document Manipulation](Repositories/MultiDocumentManipulation.md) - - [Query by example](Repositories/QueryByExample.md) -- [Mapping](Mapping/README.md) - - [Document](Mapping/Document.md) - - [Edge](Mapping/Edge.md) - - [Reference](Mapping/Reference.md) - - [Relations](Mapping/Relations.md) - - [Indexes](Mapping/Indexes.md) - - [Converter](Mapping/Converter.md) - - [Events](Mapping/Events.md) - - [Auditing](Mapping/Auditing.md) diff --git a/Documentation/Books/Drivers/SpringData/Reference/Repositories/DocumentManipulation.md b/Documentation/Books/Drivers/SpringData/Reference/Repositories/DocumentManipulation.md deleted file mode 100644 index b3f97cc0042d..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Repositories/DocumentManipulation.md +++ /dev/null @@ -1,112 +0,0 @@ - -# Manipulating documents - -## ArangoRepository.existsById - -``` -ArangoRepository.existsById(ID id) : boolean -``` - -Returns whether an entity with the given id exists. - -**Arguments** - -- **id**: `ID` - - The id (`_key`) of the document. Must not be `null`. - -**Examples** - -```Java -@Autowired MyRepository repository; - -boolean exists = repository.existsById("some-id"); -``` - -## ArangoRepository.findById - -``` -ArangoRepository.findById(ID id) : Optional -``` - -Retrieves an entity by its id. - -**Arguments** - -- **id**: `ID` - - The id (`_key`) of the document. Must not be `null`. - -**Examples** - -```java -@Autowired MyRepository repository; - -Optional entity = repository.findById("some-id"); -``` - -## ArangoRepository.save - -``` -ArangoRepository.save(S entity) : S -``` - -Saves a given entity. Use the returned instance for further operations as the save operation might have changed the entity instance completely. - -**Arguments** - -- **entity**: `S` - - The entity to save in the database. Must not be `null`. - -```java -@Autowired MyRepository repository; - -MyDomainClass entity = new MyDomainClass(); -entity = repository.save(entity); -``` - -## ArangoRepository.deleteById - -``` -ArangoRepository.deleteById(ID id) : void -``` - -Deletes the entity with the given id. - -**Arguments** - -- **id**: `ID` - - The id (`_key`) of the document. Must not be `null`. - -**Examples** - -```java -@Autowired MyRepository repository; - -repository.deleteById("some-id"); -``` - -## ArangoRepository.delete - -``` -ArangoRepository.delete(T entity) : void -``` - -Deletes a given entity. - -**Arguments** - -- **entity**: `T` - - The entity to delete. Must not be `null`. - -**Examples** - -```java -@Autowired MyRepository repository; - -MyDomainClass entity = ... -repository.delete(entity); -``` diff --git a/Documentation/Books/Drivers/SpringData/Reference/Repositories/MultiDocumentManipulation.md b/Documentation/Books/Drivers/SpringData/Reference/Repositories/MultiDocumentManipulation.md deleted file mode 100644 index 36f1717b2e05..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Repositories/MultiDocumentManipulation.md +++ /dev/null @@ -1,106 +0,0 @@ - -# Manipulating multiple documents - -## ArangoRepository.findAll - -``` -ArangoRepository.findAll() : Iterable -``` - -Returns all instances of the type. - -**Examples** - -```Java -@Autowired MyRepository repository; - -Iterable entities = repository.findAll(); -``` - -## ArangoRepository.findAllById - -``` -ArangoRepository.findAllById(Iterable ids) : Iterable -``` - -Returns all instances of the type with the given IDs. - -**Arguments** - -- **ids**: `Iterable` - - The ids (`_keys`) of the documents - -**Examples** - -```java -@Autowired MyRepository repository; - -Iterable entities = repository.findAllById(Arrays.asList("some-id", "some-other-id")); -``` - -## ArangoRepository.saveAll - -``` -ArangoRepository.saveAll(Iterable entities) : Iterable -``` - -Saves all given entities. - -**Arguments** - -- **entities**: `Iterable` - - A list of entities to save. - -**Examples** - -```java -@Autowired MyRepository repository; - -MyDomainClass obj1 = ... -MyDomainClass obj2 = ... -MyDomainClass obj3 = ... -repository.saveAll(Arrays.asList(obj1, obj2, obj3)) -``` - -## ArangoRepository.deleteAll (method 1) - -``` -ArangoRepository.deleteAll() : void -``` - -Deletes all entities managed by the repository. - -**Examples** - -```java -@Autowired MyRepository repository; - -repository.deleteAll(); -``` - -## ArangoRepository.deleteAll (method 2) - -``` -ArangoRepository.deleteAll(Iterable entities) : void -``` - -Deletes the given entities. - -**Arguments** - -- **entities**: `Iterable` - - The entities to delete. - -**Examples** - -```java -@Autowired MyRepository repository; - -MyDomainClass obj1 = ... -MyDomainClass obj2 = ... -MyDomainClass obj3 = ... -repository.deleteAll(Arrays.asList(obj1, obj2, obj3)) -``` diff --git a/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/DerivedQueries.md b/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/DerivedQueries.md deleted file mode 100644 index fa16da5b85c4..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/DerivedQueries.md +++ /dev/null @@ -1,157 +0,0 @@ - -# Derived queries - -## Semantic parts - -Spring Data ArangoDB supports queries derived from methods names by splitting it into its semantic parts and converting into AQL. The mechanism strips the prefixes `find..By`, `get..By`, `query..By`, `read..By`, `stream..By`, `count..By`, `exists..By`, `delete..By`, `remove..By` from the method and parses the rest. The `By` acts as a separator to indicate the start of the criteria for the query to be built. You can define conditions on entity properties and concatenate them with `And` and `Or`. - -The complete list of part types for derived methods is below, where `doc` is a document in the database - -| Keyword | Sample | Predicate | -| ------------------------------------------- | -------------------------------------- | -------------------------------------- | -| IsGreaterThan, GreaterThan, After | findByAgeGreaterThan(int age) | doc.age > age | -| IsGreaterThanEqual, GreaterThanEqual | findByAgeIsGreaterThanEqual(int age) | doc.age >= age | -| IsLessThan, LessThan, Before | findByAgeIsLessThan(int age) | doc.age < age | -| IsLessThanEqualLessThanEqual | findByAgeLessThanEqual(int age) | doc.age <= age | -| IsBetween, Between | findByAgeBetween(int lower, int upper) | lower < doc.age < upper | -| IsNotNull, NotNull | findByNameNotNull() | doc.name != null | -| IsNull, Null | findByNameNull() | doc.name == null | -| IsLike, Like | findByNameLike(String name) | doc.name LIKE name | -| IsNotLike, NotLike | findByNameNotLike(String name) | NOT(doc.name LIKE name) | -| IsStartingWith, StartingWith, StartsWith | findByNameStartsWith(String prefix) | doc.name LIKE prefix | -| IsEndingWith, EndingWith, EndsWith | findByNameEndingWith(String suffix) | doc.name LIKE suffix | -| Regex, MatchesRegex, Matches | findByNameRegex(String pattern) | REGEX_TEST(doc.name, name, ignoreCase) | -| (No Keyword) | findByFirstName(String name) | doc.name == name | -| IsTrue, True | findByActiveTrue() | doc.active == true | -| IsFalse, False | findByActiveFalse() | doc.active == false | -| Is, Equals | findByAgeEquals(int age) | doc.age == age | -| IsNot, Not | findByAgeNot(int age) | doc.age != age | -| IsIn, In | findByNameIn(String[] names) | doc.name IN names | -| IsNotIn, NotIn | findByNameIsNotIn(String[] names) | doc.name NOT IN names | -| IsContaining, Containing, Contains | findByFriendsContaining(String name) | name IN doc.friends | -| IsNotContaining, NotContaining, NotContains | findByFriendsNotContains(String name) | name NOT IN doc.friends | -| Exists | findByFriendNameExists() | HAS(doc.friend, name) | - -**Examples** - -```java -public interface MyRepository extends ArangoRepository { - - // FOR c IN customers FILTER c.name == @0 RETURN c - ArangoCursor findByName(String name); - ArangoCursor getByName(String name); - - // FOR c IN customers - // FILTER c.name == @0 && c.age == @1 - // RETURN c - ArangoCursor findByNameAndAge(String name, int age); - - // FOR c IN customers - // FILTER c.name == @0 || c.age == @1 - // RETURN c - ArangoCursor findByNameOrAge(String name, int age); -} -``` - -You can apply sorting for one or multiple sort criteria by appending `OrderBy` to the method and `Asc` or `Desc` for the directions. - -```java -public interface MyRepository extends ArangoRepository { - - // FOR c IN customers - // FILTER c.name == @0 - // SORT c.age DESC RETURN c - ArangoCursor getByNameOrderByAgeDesc(String name); - - // FOR c IN customers - // FILTER c.name = @0 - // SORT c.name ASC, c.age DESC RETURN c - ArangoCursor findByNameOrderByNameAscAgeDesc(String name); - -} -``` - -## Property expression - -Property expressions can refer only to direct and nested properties of the managed domain class. The algorithm checks the domain class for the entire expression as the property. If the check fails, the algorithm splits up the expression at the camel case parts from the right and tries to find the corresponding property. - -**Examples** - -```java -@Document("customers") -public class Customer { - private Address address; -} - -public class Address { - private ZipCode zipCode; -} - -public interface MyRepository extends ArangoRepository { - - // 1. step: search domain class for a property "addressZipCode" - // 2. step: search domain class for "addressZip.code" - // 3. step: search domain class for "address.zipCode" - ArangoCursor findByAddressZipCode(ZipCode zipCode); -} -``` - -It is possible for the algorithm to select the wrong property if the domain class also has a property which matches the first split of the expression. To resolve this ambiguity you can use `_` as a separator inside your method-name to define traversal points. - -**Examples** - -```java -@Document("customers") -public class Customer { - private Address address; - private AddressZip addressZip; -} - -public class Address { - private ZipCode zipCode; -} - -public class AddressZip { - private String code; -} - -public interface MyRepository extends ArangoRepository { - - // 1. step: search domain class for a property "addressZipCode" - // 2. step: search domain class for "addressZip.code" - // creates query with "x.addressZip.code" - ArangoCursor findByAddressZipCode(ZipCode zipCode); - - // 1. step: search domain class for a property "addressZipCode" - // 2. step: search domain class for "addressZip.code" - // 3. step: search domain class for "address.zipCode" - // creates query with "x.address.zipCode" - ArangoCursor findByAddress_ZipCode(ZipCode zipCode); - -} -``` - -## Geospatial queries - -Geospatial queries are a subsection of derived queries. To use a geospatial query on a collection, a geo index must exist on that collection. A geo index can be created on a field which is a two element array, corresponding to latitude and longitude coordinates. - -As a subsection of derived queries, geospatial queries support all the same return types, but also support the three return types `GeoPage, GeoResult and Georesults`. These types must be used in order to get the distance of each document as generated by the query. - -There are two kinds of geospatial query, Near and Within. Near sorts documents by distance from the given point, while within both sorts and filters documents, returning those within the given distance range or shape. - -**Examples** - -```java -public interface MyRepository extends ArangoRepository { - - GeoResult getByLocationNear(Point point); - - GeoResults findByLocationWithinOrLocationWithin(Box box, Polygon polygon); - - //Equivalent queries - GeoResults findByLocationWithinOrLocationWithin(Point point, int distance); - GeoResults findByLocationWithinOrLocationWithin(Point point, Distance distance); - GeoResults findByLocationWithinOrLocationWithin(Circle circle); - -} -``` diff --git a/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/NamedQueries.md b/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/NamedQueries.md deleted file mode 100644 index 7e45cf0798df..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/NamedQueries.md +++ /dev/null @@ -1,22 +0,0 @@ - -# Named queries - -An alternative to using the `@Query` annotation on methods is specifying them in a separate `.properties` file. The default path for the file is `META-INF/arango-named-queries.properties` and can be changed with the `EnableArangoRepositories#namedQueriesLocation()` setting. The entries in the properties file must adhere to the following convention: `{simple entity name}.{method name} = {query}`. Let's assume we have the following repository interface: - -```java -package com.arangodb.repository; - -public interface CustomerRepository extends ArangoRepository { - - Customer findByUsername(@Param("username") String username); - -} -``` - -The corresponding `arango-named-queries.properties` file looks like this: - -```properties -Customer.findByUsername = FOR c IN customers FILTER c.username == @username RETURN c -``` - -The queries specified in the properties file are no different than the queries that can be defined with the `@Query` annotation. The only difference is that the queries are in one place. If there is a `@Query` annotation present and a named query defined, the query in the `@Query` annotation takes precedence. diff --git a/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/QueryMethods.md b/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/QueryMethods.md deleted file mode 100644 index ffa111b737f2..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/QueryMethods.md +++ /dev/null @@ -1,85 +0,0 @@ - -# Query methods - -Queries using [ArangoDB Query Language (AQL)](https://docs.arangodb.com/current/AQL/index.html) can be supplied with the `@Query` annotation on methods. - -## Passing collection name - -Instead of writing the collection name statically into the query string, the placeholder `#collection` can be specified. - -```java -public interface MyRepository extends ArangoRepository{ - - // FOR c IN customer RETURN c - @Query("FOR c IN #collection RETURN c") - ArangoCursor query(); - -} -``` - -## Passing bind parameters - -There are three ways of passing bind parameters to the query in the query annotation. - -### Number matching - -Using number matching, arguments will be substituted into the query in the order they are passed to the query method. - -```java -public interface MyRepository extends ArangoRepository{ - - @Query("FOR c IN #collection FILTER c.name == @0 AND c.surname == @1 RETURN c") - ArangoCursor query(String name, String surname); - -} -``` - -### @Param - -With the `@Param` annotation, the argument will be placed in the query at the place corresponding to the value passed to the `@Param` annotation. - -```java -public interface MyRepository extends ArangoRepository{ - - @Query("FOR c IN #collection FILTER c.name == @name AND c.surname == @surname RETURN c") - ArangoCursor query(@Param("name") String name, @Param("surname") String surname); - -} -``` - -### @BindVars - -In addition you can use a method parameter of type `Map` annotated with `@BindVars` as your bind parameters. You can then fill the map with any parameter used in the query. (see [here](https://docs.arangodb.com/3.1/AQL/Fundamentals/BindParameters.html#bind-parameters) for more Information about Bind Parameters). - -```java -public interface MyRepository extends ArangoRepository{ - - @Query("FOR c IN #collection FILTER c.name == @name AND c.surname = @surname RETURN c") - ArangoCursor query(@BindVars Map bindVars); - -} -``` - -A mixture of any of these methods can be used. Parameters with the same name from an `@Param` annotation will override those in the `bindVars`. - -```java -public interface MyRepository extends ArangoRepository{ - - @Query("FOR c IN #collection FILTER c.name == @name AND c.surname = @surname RETURN c") - ArangoCursor query(@BindVars Map bindVars, @Param("name") String name); - -} -``` - -## Query options - -`AqlQueryOptions` can also be passed to the driver, as an argument anywhere in the method signature. - -```java -public interface MyRepository extends ArangoRepository{ - - @Query("FOR c IN #collection FILTER c.name == @name AND c.surname == @surname RETURN c") - ArangoCursor query(@Param("name") String name, @Param("surname") String surname, AqlQueryOptions options); - -} -``` diff --git a/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/README.md b/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/README.md deleted file mode 100644 index f60125ba7e7b..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/README.md +++ /dev/null @@ -1,108 +0,0 @@ - -# Queries - -Spring Data ArangoDB supports three kinds of queries: - -- [Derived queries](DerivedQueries.md) -- [Query methods](QueryMethods.md) -- [Named queries](NamedQueries.md) - -## Return types - -The method return type for single results can be a primitive type, a domain class, `Map`, `BaseDocument`, `BaseEdgeDocument`, `Optional`, `GeoResult`. - -The method return type for multiple results can additionally be `ArangoCursor`, `Iterable`, `Collection`, `List`, `Set`, `Page`, `Slice`, `GeoPage`, `GeoResults` where Type can be everything a single result can be. - -## AQL query options - -You can set additional options for the query and the created cursor over the class `AqlQueryOptions` which you can simply define as a method parameter without a specific name. AqlQuery options can also be defined with the `@QueryOptions` annotation, as shown below. Aql query options from an annotation and those from an argument are merged if both exist, with those in the argument taking precedence. - -The `AqlQueryOptions` allows you to set the cursor time-to-live, batch-size, -caching flag and several other settings. This special parameter works with both -[query methods](QueryMethods.md) -and [derived queries](DerivedQueries.md). Keep in mind that some options, like -time-to-live, are only effective if the method return type is`ArangoCursor` -or `Iterable`. - -**Examples** - -```java -public interface MyRepository extends Repository { - - - @Query("FOR c IN #collection FILTER c.name == @0 RETURN c") - Iterable query(String name, AqlQueryOptions options); - - - Iterable findByName(String name, AqlQueryOptions options); - - - @QueryOptions(maxPlans = 1000, ttl = 128) - ArangoCursor findByAddressZipCode(ZipCode zipCode); - - - @Query("FOR c IN #collection FILTER c[@field] == @value RETURN c") - @QueryOptions(cache = true, ttl = 128) - ArangoCursor query(Map bindVars, AqlQueryOptions options); - -} -``` - -## Paging and sorting - -Spring Data ArangoDB supports Spring Data's `Pageable` and `Sort` parameters for repository query methods. If these parameters are used together with a native query, either through `@Query` annotation or [named queries](NamedQueries.md), a placeholder must be specified: - -- `#pageable` for `Pageable` parameter -- `#sort` for `Sort` parameter - -Sort properties or paths are attributes separated by dots (e.g. `customer.age`). Some rules apply for them: - -- they must not begin or end with a dot (e.g. `.customer.age`) -- dots in attributes are supported, but the whole attribute must be enclosed by backticks (e.g. `` customer.`attr.with.dots` `` -- backticks in attributes are supported, but they must be escaped with a backslash (e.g. `` customer.attr_with\` ``) -- any backslashes (that do not escape a backtick) are escaped (e.g. `customer\` => `customer\\`) - -**Examples** - -``` -just.`some`.`attributes.that`.`form\``.a path\`.\ is converted to -`just`.`some`.`attributes.that`.`form\``.`a path\``.`\\` -``` - -**Native queries example** - -```java -public interface CustomerRepository extends ArangoRepository { - - @Query("FOR c IN #collection FILTER c.name == @1 #pageable RETURN c") - Page findByNameNative(Pageable pageable, String name); - - @Query("FOR c IN #collection FILTER c.name == @1 #sort RETURN c") - List findByNameNative(Sort sort, String name); -} - -// don't forget to specify the var name of the document -final Pageable page = PageRequest.of(1, 10, Sort.by("c.age")); -repository.findByNameNative(page, "Matt"); - -final Sort sort = Sort.by(Direction.DESC, "c.age"); -repository.findByNameNative(sort, "Tony"); -``` - -**Derived queries example** - -```java -public interface CustomerRepository extends ArangoRepository { - - Page findByName(Pageable pageable, String name); - - List findByName(Sort sort, String name); -} - -// no var name is necessary for derived queries -final Pageable page = PageRequest.of(1, 10, Sort.by("age")); -repository.findByName(page, "Matt"); - -final Sort sort = Sort.by(Direction.DESC, "age"); -repository.findByName(sort, "Tony"); -``` diff --git a/Documentation/Books/Drivers/SpringData/Reference/Repositories/QueryByExample.md b/Documentation/Books/Drivers/SpringData/Reference/Repositories/QueryByExample.md deleted file mode 100644 index 7ab142dddd9b..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Repositories/QueryByExample.md +++ /dev/null @@ -1,98 +0,0 @@ - -# Query by example - -## ArangoRepository.exists - -``` -ArangoRepository.exists(Example example) : boolean -``` - -Checks whether the data store contains elements that match the given `Example`. - -**Arguments** - -- **example**: `Example` - - The example to use. Must not be `null`. - -**Examples** - -```java -@Autowired MyRepository repository; - -MyDomainClass sample = new MyDomainClass(); -sample.setName("John"); // set some data in sample -boolean exists = repository.exists(Example.of(sample)); -``` - -## ArangoRepository.findOne - -``` -ArangoRepository.findOne(Example example) : Optional -``` - -Returns a single entity matching the given `Example` or `Optional#empty()` if none was found. - -**Arguments** - -- **example**: `Example` - - The example to use. Must not be `null`. - -**Examples** - -```java -@Autowired MyRepository repository; - -MyDomainClass sample = new MyDomainClass(); -sample.setName("John"); // set some data in sample -MyDomainClass entity = repository.findOne(Example.of(sample)); -``` - -## ArangoRepository.findAll - -``` -ArangoRepository.findAll(Example example) : Iterable -``` - -Returns all entities matching the given `Example`. In case no match could be found an empty `Iterable` is returned. - -**Arguments** - -- **example**: `Example` - - The example to use. Must not be `null`. - -**Examples** - -```java -@Autowired MyRepository repository; - -MyDomainClass sample = new MyDomainClass(); -sample.setName("John"); // set some data in sample -Iterable entities = repository.findAll(Example.of(sample)); -``` - -## ArangoRepository.count - -``` -ArangoRepository.count(Example example) : long -``` - -Returns the number of instances matching the given `Example`. - -**Arguments** - -- **example**: `Example` - - The example to use. Must not be `null`. - -**Examples** - -```java -@Autowired MyRepository repository; - -MyDomainClass sample = new MyDomainClass(); -sample.setName("John"); // set some data in sample -long count = repository.count(Example.of(sample)); -``` diff --git a/Documentation/Books/Drivers/SpringData/Reference/Repositories/README.md b/Documentation/Books/Drivers/SpringData/Reference/Repositories/README.md deleted file mode 100644 index 6ea7ab39f14b..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Repositories/README.md +++ /dev/null @@ -1,34 +0,0 @@ - -# Repositories - -Spring Data Commons provides a composable repository infrastructure which Spring Data ArangoDB is built on. These allow for interface-based composition of repositories consisting of provided default implementations for certain interfaces (like `CrudRepository`) and custom implementations for other methods. - -The base interface of Spring Data ArangoDB is `ArangoRepository`. It extends the Spring Data interfaces `PagingAndSortingRepository` and `QueryByExampleExecutor`. To get access to all Sping Data ArangoDB repository functionallity simply create your own interface extending `ArangoRepository`. - -The type `T` represents your domain class and type `ID` the type of your field annotated with `@Id` in your domain class. This field is persistend in ArangoDB as document field `_key`. - -**Examples** - -```java -@Document -public class MyDomainClass { - @Id - private String id; - -} - -public interface MyRepository extends ArangoRepository { - -} -``` - -Instances of a Repository are created in Spring beans through the auto-wired mechanism of Spring. - -```java -public class MySpringBean { - - @Autowired - private MyRepository rep; - -} -``` diff --git a/Documentation/Books/Drivers/SpringData/Reference/Template/CollectionManipulation.md b/Documentation/Books/Drivers/SpringData/Reference/Template/CollectionManipulation.md deleted file mode 100644 index 96e4afecce57..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Template/CollectionManipulation.md +++ /dev/null @@ -1,314 +0,0 @@ - -# Manipulating the collection - -## ArangoOperations.collection - -``` -ArangoOperations.collection(Class entityClass) : CollectionOperations -``` - -``` -ArangoOperations.collection(String name) : CollectionOperations -``` - -Returns the operations interface for a collection. If the collection does not exists, it is created automatically. - -**Arguments** - -- **entityClass**: `Class` - - The entity type representing the collection - -- **name**: `String` - - The name of the collection - -**Examples** - -```Java -@Autowired ArangoOperations template; - -CollectionOperations collection = template.collection(MyObject.class); - -// -- or -- - -CollectionOperations collection = template.collection("some-collection-name"); -``` - -## CollectionOperations.truncate - -``` -CollectionOperations.truncate() : void -``` - -Removes all documents from the collection, but leaves the indexes intact - -**Examples** - -```Java -@Autowired ArangoOperations template; - -CollectionOperations collection = template.collection(MyObject.class); -collection.truncate(); -``` - -## CollectionOperations.drop - -``` -CollectionOperations.drop() : void -``` - -Deletes the collection from the database. - -**Examples** - -```Java -@Autowired ArangoOperations template; - -CollectionOperations collection = template.collection(MyObject.class); -collection.drop(); -``` - -## CollectionOperations.count - -``` -CollectionOperation.count() : long -``` - -Counts the documents in a collection - -**Examples** - -```Java -@Autowired ArangoOperations template; - -CollectionOperations collection = template.collection(MyObject.class); -long count = collection.count(); -``` - -## CollectionOperations.getProperties - -``` -CollectionOperations.getProperties() : CollectionPropertiesEntity -``` - -Reads the properties of the specified collection - -**Examples** - -```Java -@Autowired ArangoOperations template; - -CollectionOperations collection = template.collection(MyObject.class); -CollectionPropertiesEntity properties = collection.getProperties(); -``` - -## CollectionOperation.getIndexes - -``` -CollectionOperations.getIndexes() : Collection -``` - -Returns all indexes of the collection - -**Examples** - -```Java -@Autowired ArangoOperations template; - -CollectionOperations collection = template.collection(MyObject.class); -Collection indexes = collection.getIndexes(); -``` - -## CollectionOperations.ensureHashIndex - -``` -CollectionOperations.ensureHashIndex(Iterable fields, HashIndexOptions options) : IndexEntity -``` - -Creates a hash index for the collection if it does not already exist. - -**Arguments** - -- **fields**: `Iterable` - - A list of attribute paths - -- **options**: `HashIndexOptions` - - - **unique**: `Boolean` - - If true, then create a unique index - - - **sparse**: `Boolean` - - If true, then create a sparse index - - - **deduplicate**: `Boolean` - - If false, the deduplication of array values is turned off. - -**Examples** - -```Java -@Autowired ArangoOperations template; - -CollectionOperations collection = template.collection(MyObject.class); -IndexEntity index = collection.ensureHashIndex(Arrays.asList("a", "b.c"), new HashIndexOptions()); -// the index has been created with the handle `index.getId()` -``` - -## CollectionOperations.ensureSkiplistIndex - -``` -CollectionOperations.ensureSkiplistIndex(Iterable fields, SkiplistIndexOptions options) : IndexEntity -``` - -Creates a skip-list index for the collection if it does not already exist. - -**Arguments** - -- **fields**: `Iterable` - - A list of attribute paths - -- **options**: `SkiplistIndexOptions` - - - **unique**: `Boolean` - - If true, then create a unique index - - - **sparse**: `Boolean` - - If true, then create a sparse index - - - **deduplicate**: `Boolean` - - If false, the deduplication of array values is turned off. - -**Examples** - -```Java -@Autowired ArangoOperations template; - -CollectionOperations collection = template.collection(MyObject.class); -IndexEntity index = collection.ensureSkiplistIndex(Arrays.asList("a", "b.c"), new SkiplistIndexOptions()); -// the index has been created with the handle `index.getId()` -``` - -## CollectionOperations.ensureGeoIndex - -``` -CollectionOperations.ensureGeoIndex(Iterable fields, GeoIndexOptions options) : IndexEntity -``` - -Creates a geo index for the collection if it does not already exist. - -**Arguments** - -- **fields**: `Iterable` - - A list of attribute paths - -- **options**: `GeoIndexOptions` - - - **geoJson**: `Boolean` - - If a geo-spatial index on a location is constructed and geoJson is true, then the order within the array is longitude followed by latitude. This corresponds to the format described in. - -**Examples** - -```Java -@Autowired ArangoOperations template; - -CollectionOperations collection = template.collection(MyObject.class); -IndexEntity index = collection.ensureGeoIndex(Arrays.asList("latitude", "longitude"), new GeoIndexOptions()); -// the index has been created with the handle `index.getId()` -``` - -## CollectionOperations.ensureFulltextIndex - -``` -CollectionOperations.ensureFulltextIndex(Iterable fields, FulltextIndexOptions options) : IndexEntity -``` - -Creates a fulltext index for the collection if it does not already exist. - -**Arguments** - -- **fields**: `Iterable` - - A list of attribute paths - -- **options**: `FulltextIndexOptions` - - - **minLength**: `Integer` - - Minimum character length of words to index. Will default to a server-defined value if unspecified. It is thus recommended to set this value explicitly when creating the index. - -**Examples** - -```Java -@Autowired ArangoOperations template; - -CollectionOperations collection = template.collection(MyObject.class); -IndexEntity index = collection.ensureFulltextIndex(Arrays.asList("description"), new FulltextIndexOptions()); -// the index has been created with the handle `index.getId()` -``` - -## CollectionOperations.ensurePersistentIndex - -``` -CollectionOperations.ensurePersistentIndex(Iterable fields, PersistentIndexOptions options) : IndexEntity -``` - -Creates a persistent index for the collection if it does not already exist. - -**Arguments** - -- **fields**: `Iterable` - - A list of attribute paths - -- **options**: `PersistentIndexOptions` - - - **unique**: `Boolean` - - If true, then create a unique index - - - **sparse**: `Boolean` - - If true, then create a sparse index - -**Examples** - -```Java -@Autowired ArangoOperations template; - -CollectionOperations collection = template.collection(MyObject.class); -IndexEntity index = collection.ensurePersistentIndex(Arrays.asList("a", "b.c"), new PersistentIndexOptions()); -// the index has been created with the handle `index.getId()` -``` - -## CollectionOperations.dropIndex - -``` -CollectionOperations.dropIndex(String id) : void -``` - -Deletes the index with the given _id_ from the collection. - -**Arguments** - -- **id**: `String` - - The index-handle - -**Examples** - -```Java -@Autowired ArangoOperations template; - -CollectionOperations collection = template.collection(MyObject.class); -collection.dropIndex("some-index"); -``` diff --git a/Documentation/Books/Drivers/SpringData/Reference/Template/DocumentManipulation.md b/Documentation/Books/Drivers/SpringData/Reference/Template/DocumentManipulation.md deleted file mode 100644 index 7bd87d5cf3d0..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Template/DocumentManipulation.md +++ /dev/null @@ -1,286 +0,0 @@ - -# Manipulating documents - -## ArangoOperations.exists - -``` -ArangoOperations.exists(String id, Class entityClass) : boolean -``` - -Checks whether the document exists by reading a single document head - -**Arguments** - -- **id**: `String` - - The id or key of the document - -- **entityClass**: `Class` - - The entity class which represents the collection - -**Examples** - -```Java -@Autowired ArangoOperations template; - -boolean exists = template.exists("some-id", MyObject.class); -``` - -## ArangoOperations.find - -``` -ArangoOperations.find(String id, Class entityClass, DocumentReadOptions options) : Optional -``` - -Retrieves the document with the given _id_ from a collection. - -**Arguments** - -- **id**: `String` - - The id or key of the document - -- **entityClass**: `Class` - - The entity class which represents the collection - -- **options**: `DocumentReadOptions` - - - **ifNoneMatch**: `String` - - Document revision must not contain If-None-Match - - - **ifMatch**: `String` - - Document revision must contain If-Match - - - **catchException**: `Boolean` - - Whether or not catch possible thrown exceptions - -**Examples** - -```Java -@Autowired ArangoOperations template; - -Optional doc = template.find("some-id", MyObject.class, new DocumentReadOptions()); -``` - -## ArangoOperations.repsert - -``` -ArangoOperations.repsert(T value) : void -``` - -Creates a new document from the given document, unless there is already a document with the id given. In that case it replaces the document. - -**Arguments** - -- **value**: `T` - - A representation of a single document - -**Examples** - -```Java -@Autowired ArangoOperations template; - -MyObject myObj = ... -template.repsert(myObj); -``` - -## ArangoOperations.insert - -``` -ArangoOperations.insert(T value, DocumentCreateOptions options) : DocumentEntity -``` - -Creates a new document from the given document, unless there is already a document with the \_key given. If no \_key is given, a new unique \_key is generated automatically. - -**Arguments** - -- **value**: `T` - - A representation of a single document - -- **options**: `DocumentCreateOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - - - **returnNew**: `Boolean` - - Return additionally the complete new document under the attribute new in the result. - - - **returnOld**: `Boolean` - - Additionally return the complete old document under the attribute old in the result. Only available if the _overwrite_ option is used. - - - **overwrite**: `Boolean` - - If set to true, the insert becomes a replace-insert. If a document with the same \_key already exists the new document is not rejected with unique constraint violated but will replace the old document. - - - **silent**: `Boolean` - - If set to true, an empty object will be returned as response. No meta-data will be returned for the created document. This option can be used to save some network traffic. - -**Examples** - -```Java -@Autowired ArangoOperations template; - -MyObject myObj = ... -DocumentEntity info = template.insert(myObj, new DocumentCreateOptions()); -``` - -## ArangoOperations.replace - -``` -ArangoOperations.replace(String id, T value, DocumentReplaceOptions options) : DocumentEntity -``` - -Replaces the document with _id_ with the one in the body, provided there is such a document and no precondition is violated. - -**Arguments** - -- **id**: `String` - - The id or key of the document - -- **value**: `T` - - A representation of a single document - -- **options**: `DocumentReplaceOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - - - **ignoreRevs**: `Boolean` - - By default, or if this is set to true, the \_rev attributes in the given document is ignored. If this is set to false, then the \_rev attribute given in the body document is taken as a precondition. The document is only replaced if the current revision is the one specified. - - - **ifMatch**: `String` - - Replace a document based on target revision - - - **returnNew**: `Boolean` - - Return additionally the complete new document under the attribute new in the result. - - - **returnOld**: `Boolean` - - Additionally return the complete old document under the attribute old in the result. Only available if the _overwrite_ option is used. - - - **silent**: `Boolean` - - If set to true, an empty object will be returned as response. No meta-data will be returned for the created document. This option can be used to save some network traffic. - -**Examples** - -```Java -@Autowired ArangoOperations template; - -MyObject myObj = ... -DocumentEntity info = template.replace("some-id", myObj, new DocumentReplaceOptions()); -``` - -## ArangoOperations.update - -``` -ArangoOperations.update(String id, T value, DocumentUpdateOptions options) : DocumentEntity -``` - -Partially updates the document identified by document id or key. The value must contain a document with the attributes to patch (the patch document). All attributes from the patch document will be added to the existing document if they do not yet exist, and overwritten in the existing document if they do exist there. - -**Arguments** - -- **id**: `String` - - The id or key of the document - -- **value**: `T` - - A representation of a single document - -- **options**: `DocumentUpdateOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - - - **ignoreRevs**: `Boolean` - - By default, or if this is set to true, the \_rev attributes in the given document is ignored. If this is set to false, then the \_rev attribute given in the body document is taken as a precondition. The document is only replaced if the current revision is the one specified. - - - **ifMatch**: `String` - - Replace a document based on target revision - - - **returnNew**: `Boolean` - - Return additionally the complete new document under the attribute new in the result. - - - **returnOld**: `Boolean` - - Additionally return the complete old document under the attribute old in the result. Only available if the _overwrite_ option is used. - - - **silent**: `Boolean` - - If set to true, an empty object will be returned as response. No meta-data will be returned for the created document. This option can be used to save some network traffic. - -**Examples** - -```Java -@Autowired ArangoOperations template; - -MyObject myObj = ... -DocumentEntity info = template.update("some-id", myObj, new DocumentReplaceOptions()); -``` - -## ArangoOperations.delete - -``` -ArangoOperations.delete(String id, Class entityClass, DocumentDeleteOptions options) : DocumentEntity -``` - -Deletes the document with the given _id_ from a collection. - -**Arguments** - -- **id**: `String` - - The id or key of the document - -- **entityClass**: `Class` - - The entity class which represents the collection - -- **options**: `DocumentDeleteOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - - - **ifMatch**: `String` - - Replace a document based on target revision - - - **returnOld**: `Boolean` - - Additionally return the complete old document under the attribute old in the result. Only available if the _overwrite_ option is used. - - - **silent**: `Boolean` - - If set to true, an empty object will be returned as response. No meta-data will be returned for the created document. This option can be used to save some network traffic. - -**Examples** - -```Java -@Autowired ArangoOperations template; - -template.delete("some-id", MyObject.class, new DocumentDeleteOptions()); -``` diff --git a/Documentation/Books/Drivers/SpringData/Reference/Template/MultiDocumentManipulation.md b/Documentation/Books/Drivers/SpringData/Reference/Template/MultiDocumentManipulation.md deleted file mode 100644 index c67ed706aa8a..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Template/MultiDocumentManipulation.md +++ /dev/null @@ -1,255 +0,0 @@ - -# Manipulating multiple documents - -## ArangoOperations.find - -``` -ArangoOperations.find(Iterable ids, Class entityClass) : Iterable -``` - -Retrieves multiple documents with the given _ids_ from a collection. - -**Arguments** - -- **ids**: `Iterable` - - The ids or keys of the documents - -- **entityClass**: `Class` - - The entity type of the documents - -**Examples** - -```Java -@Autowired ArangoOperations template; - -Iterable docs = template.find(Arrays.asList("some-id", "some-other-id"), MyObject.class); -``` - -## ArangoOperations.findAll - -``` -ArangoOperations.findAll(Class entityClass) : Iterable -``` - -Retrieves all documents from a collection. - -**Arguments** - -- **entityClass**: `Class` - - The entity class which represents the collection - -**Examples** - -```Java -@Autowired ArangoOperations template; - -Iterable docs = template.find(MyObject.class); -``` - -## ArangoOperations.insert - -``` -ArangoOperations.insert(Iterable values, Class entityClass, DocumentCreateOptions options) : MultiDocumentEntity -``` - -Creates new documents from the given documents, unless there is already a document with the \_key given. If no \_key is given, a new unique \_key is generated automatically. - -**Arguments** - -- **values**: `Iterable` - - A List of documents - -- **entityClass**: `Class` - - The entity class which represents the collection - -- **options**: `DocumentCreateOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - - - **returnNew**: `Boolean` - - Return additionally the complete new document under the attribute new in the result. - - - **returnOld**: `Boolean` - - Additionally return the complete old document under the attribute old in the result. Only available if the _overwrite_ option is used. - - - **overwrite**: `Boolean` - - If set to true, the insert becomes a replace-insert. If a document with the same \_key already exists the new document is not rejected with unique constraint violated but will replace the old document. - - - **silent**: `Boolean` - - If set to true, an empty object will be returned as response. No meta-data will be returned for the created document. This option can be used to save some network traffic. - -**Examples** - -```Java -@Autowired ArangoOperations template; - -MyObject obj1 = ... -MyObject obj2 = ... -MyObject obj3 = ... -template.insert(Arrays.asList(obj1, obj2, obj3)); -``` - -## ArangoOperations.replace - -``` -ArangoOperations.replace(Iterable values, Class entityClass, DocumentReplaceOptions options) : MultiDocumentEntity -``` - -Replaces multiple documents in the specified collection with the ones in the values, the replaced documents are specified by the \_key attributes in the documents in values. - -**Arguments** - -- **values**: `Iterable` - - A List of documents - -- **entityClass**: `Class` - - The entity class which represents the collection - -- **options**: `DocumentReplaceOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - - - **ignoreRevs**: `Boolean` - - By default, or if this is set to true, the \_rev attributes in the given document is ignored. If this is set to false, then the \_rev attribute given in the body document is taken as a precondition. The document is only replaced if the current revision is the one specified. - - - **ifMatch**: `String` - - Replace a document based on target revision - - - **returnNew**: `Boolean` - - Return additionally the complete new document under the attribute new in the result. - - - **returnOld**: `Boolean` - - Additionally return the complete old document under the attribute old in the result. Only available if the _overwrite_ option is used. - - - **silent**: `Boolean` - - If set to true, an empty object will be returned as response. No meta-data will be returned for the created document. This option can be used to save some network traffic. - -**Examples** - -```Java -@Autowired ArangoOperations template; - -MyObject obj1 = ... -MyObject obj2 = ... -MyObject obj3 = ... -template.replace(Arrays.asList(obj1, obj2, obj3), new DocumentReplaceOptions()); -``` - -## ArangoOperations.update - -``` -ArangoOperations.update(Iterable values, Class entityClass, DocumentUpdateOptions options) : MultiDocumentEntity -``` - -Partially updates documents, the documents to update are specified by the \_key attributes in the objects on values. Vales must contain a list of document updates with the attributes to patch (the patch documents). All attributes from the patch documents will be added to the existing documents if they do not yet exist, and overwritten in the existing documents if they do exist there. - -**Arguments** - -- **values**: `Iterable` - - A List of documents - -- **entityClass**: `Class` - - The entity class which represents the collection - -- **options**: `DocumentUpdateOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - - - **ignoreRevs**: `Boolean` - - By default, or if this is set to true, the \_rev attributes in the given document is ignored. If this is set to false, then the \_rev attribute given in the body document is taken as a precondition. The document is only replaced if the current revision is the one specified. - - - **ifMatch**: `String` - - Replace a document based on target revision - - - **returnNew**: `Boolean` - - Return additionally the complete new document under the attribute new in the result. - - - **returnOld**: `Boolean` - - Additionally return the complete old document under the attribute old in the result. Only available if the _overwrite_ option is used. - - - **silent**: `Boolean` - - If set to true, an empty object will be returned as response. No meta-data will be returned for the created document. This option can be used to save some network traffic. - -**Examples** - -```Java -@Autowired ArangoOperations template; - -MyObject obj1 = ... -MyObject obj2 = ... -MyObject obj3 = ... -template.update(Arrays.asList(obj1, obj2, obj3), new DocumentUpdateOptions()); -``` - -## ArangoOperations.delete - -``` -ArangoOperations.delete(Iterable values, Class entityClass, DocumentDeleteOptions options) : MultiDocumentEntity -``` - -Deletes multiple documents from a collection. - -**Arguments** - -- **values**: `Iterable` - - The keys of the documents or the documents themselves - -- **entityClass**: `Class` - - The entity class which represents the collection - -- **options**: `DocumentDeleteOptions` - - - **waitForSync**: `Boolean` - - Wait until document has been synced to disk. - - - **ifMatch**: `String` - - Replace a document based on target revision - - - **returnOld**: `Boolean` - - Additionally return the complete old document under the attribute old in the result. Only available if the _overwrite_ option is used. - - - **silent**: `Boolean` - - If set to true, an empty object will be returned as response. No meta-data will be returned for the created document. This option can be used to save some network traffic. - -**Examples** - -```Java -@Autowired ArangoOperations template; - -collection.delete(Arrays.asList("some-id", "some-other-id"), MyObject.class, new DocumentDeleteOptions()); -``` diff --git a/Documentation/Books/Drivers/SpringData/Reference/Template/Queries.md b/Documentation/Books/Drivers/SpringData/Reference/Template/Queries.md deleted file mode 100644 index fcc3230031a6..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Template/Queries.md +++ /dev/null @@ -1,103 +0,0 @@ - -## ArangoOperations.query - -``` -ArangoOperations.query(String query, Map bindVars, AqlQueryOptions options, Class entityClass) : ArangoCursor -``` - -Performs a database query using the given _query_ and _bindVars_, then returns a new _ArangoCursor_ instance for the result list. - -**Arguments** - -- **query**: `String` - - An AQL query string - -- **bindVars**: `Map` - - key/value pairs defining the variables to bind the query to - -- **options**: `AqlQueryOptions` - - - **count**: `Boolean` - - Indicates whether the number of documents in the result set should be returned in the "count" attribute of the result. Calculating the "count" attribute might have a performance impact for some queries in the future so this option is turned off by default, and "count" is only returned when requested. - - - **ttl**: `Integer` - - The time-to-live for the cursor (in seconds). The cursor will be removed on the server automatically after the specified amount of time. This is useful to ensure garbage collection of cursors that are not fully fetched by clients. If not set, a server-defined value will be used. - - - **batchSize**: `Integer` - - Maximum number of result documents to be transferred from the server to the client in one roundtrip. If this attribute is not set, a server-controlled default value will be used. A batchSize value of 0 is disallowed. - - - **memoryLimit**: `Long` - - The maximum number of memory (measured in bytes) that the query is allowed to use. If set, then the query will fail with error "resource limit exceeded" in case it allocates too much memory. A value of 0 indicates that there is no memory limit. - - - **cache**: `Boolean` - - Flag to determine whether the AQL query cache shall be used. If set to false, then any query cache lookup will be skipped for the query. If set to true, it will lead to the query cache being checked for the query if the query cache mode is either on or demand. - - - **failOnWarning**: `Boolean` - - When set to true, the query will throw an exception and abort instead of producing a warning. This option should be used during development to catch potential issues early. When the attribute is set to false, warnings will not be propagated to exceptions and will be returned with the query result. There is also a server configuration option --query.fail-on-warning for setting the default value for failOnWarning so it does not need to be set on a per-query level. - - - **profile**: `Boolean` - - If set to true, then the additional query profiling information will be returned in the sub-attribute profile of the extra return attribute if the query result is not served from the query cache. - - - **maxTransactionSize**: `Long` - - Transaction size limit in bytes. Honored by the RocksDB storage engine only. - - - **maxWarningCount**: `Long` - - Limits the maximum number of warnings a query will return. The number of warnings a query will return is limited to 10 by default, but that number can be increased or decreased by setting this attribute. - - - **intermediateCommitCount**: `Long` - - Maximum number of operations after which an intermediate commit is performed automatically. Honored by the RocksDB storage engine only. - - - **intermediateCommitSize**: `Long` - - Maximum total size of operations after which an intermediate commit is performed automatically. Honored by the RocksDB storage engine only. - - - **satelliteSyncWait**: `Double` - - This enterprise parameter allows to configure how long a DBServer will have time to bring the satellite collections involved in the query into sync. The default value is 60.0 (seconds). When the max time has been reached the query will be stopped. - - - **skipInaccessibleCollections** - - AQL queries (especially graph traversals) will treat collection to which a user has no access rights as if these collections were empty. Instead of returning a forbidden access error, your queries will execute normally. This is intended to help with certain use-cases: A graph contains several collections and different users execute AQL queries on that graph. You can now naturally limit the accessible results by changing the access rights of users on collections. This feature is only available in the Enterprise Edition. - - - **fullCount**: `Boolean` - - If set to true and the query contains a LIMIT clause, then the result will have an extra attribute with the sub-attributes stats and fullCount, { ... , "extra": { "stats": { "fullCount": 123 } } }. The fullCount attribute will contain the number of documents in the result before the last LIMIT in the query was applied. It can be used to count the number of documents that match certain filter criteria, but only return a subset of them, in one go. It is thus similar to MySQL's SQL_CALC_FOUND_ROWS hint. Note that setting the option will disable a few LIMIT optimizations and may lead to more documents being processed, and thus make queries run longer. Note that the fullCount attribute will only be present in the result if the query has a LIMIT clause and the LIMIT clause is actually used in the query. - - - **maxPlans**: `Integer` - - Limits the maximum number of plans that are created by the AQL query optimizer. - - - **rules**: `Collection` - - A list of to-be-included or to-be-excluded optimizer rules can be put into this attribute, telling the optimizer to include or exclude specific rules. To disable a rule, prefix its name with a -, to enable a rule, prefix it with a +. There is also a pseudo-rule all, which will match all optimizer rules. - - - **stream**: `Boolean` - - Specify true and the query will be executed in a streaming fashion. The query result is not stored on the server, but calculated on the fly. Beware: long-running queries will need to hold the collection locks for as long as the query cursor exists. When set to false a query will be executed right away in its entirety. In that case query results are either returned right away (if the resultset is small enough), or stored on the arangod instance and accessible via the cursor API (with respect to the ttl). It is advisable to only use this option on short-running queries or without exclusive locks (write-locks on MMFiles). Please note that the query options cache, count and fullCount will not work on streaming queries. Additionally query statistics, warnings and profiling data will only be available after the query is finished. The default value is false. - -- **type**: `Class` - - The type of the result (POJO class, `VPackSlice` or `Collection`/`List`/`Map`) - -**Examples** - -```Java -@Autowired ArangoOperations template; - -ArangoCursor cursor = template.query("FOR i IN @@collection RETURN i" - new MapBuilder().put("@collection", MyObject.class).get(), - new AqlQueryOptions(), - MyObject.class); -``` diff --git a/Documentation/Books/Drivers/SpringData/Reference/Template/README.md b/Documentation/Books/Drivers/SpringData/Reference/Template/README.md deleted file mode 100644 index 6d24ed1f74c2..000000000000 --- a/Documentation/Books/Drivers/SpringData/Reference/Template/README.md +++ /dev/null @@ -1,5 +0,0 @@ - -# Template - -With `ArangoTemplate` Spring Data ArangoDB offers a central support for interactions with the database over a rich feature set. It mostly offers the features from the ArangoDB Java driver with additional exception translation from the drivers exceptions to the Spring Data access exceptions inheriting the `DataAccessException` class. -The `ArangoTemplate` class is the default implementation of the operations interface `ArangoOperations` which developers of Spring Data are encouraged to code against. diff --git a/Documentation/Books/Drivers/book.json b/Documentation/Books/Drivers/book.json deleted file mode 100644 index 780003be2daa..000000000000 --- a/Documentation/Books/Drivers/book.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "gitbook": "^3.2.2", - "title": "ArangoDB VERSION_NUMBER Drivers Documentation", - "version": "VERSION_NUMBER", - "author": "ArangoDB GmbH", - "description": "Aggregated documentation of the official drivers for ArangoDB - the native multi-model NoSQL database", - "language": "en", - "plugins": [ - "-search", - "-lunr", - "-sharing", - "toggle-chapters", - "addcssjs", - "anchorjs", - "sitemap-general@git+https://github.com/Simran-B/gitbook-plugin-sitemap-general.git", - "ga", - "callouts@git+https://github.com/Simran-B/gitbook-plugin-callouts.git", - "edit-link", - "page-toc@git+https://github.com/Simran-B/gitbook-plugin-page-toc.git", - "localized-footer" - ], - "pdf": { - "fontSize": 12, - "toc": true, - "margin": { - "right": 60, - "left": 60, - "top": 35, - "bottom": 35 - } - }, - "styles": { - "website": "styles/website.css" - }, - "pluginsConfig": { - "addcssjs": { - "js": ["styles/header.js", "styles/hs.js"], - "css": ["styles/header.css"] - }, - "sitemap-general": { - "prefix": "https://docs.arangodb.com/devel/Drivers/", - "changefreq": "@GCHANGE_FREQ@", - "priority": @GPRIORITY@ - }, - "ga": { - "token": "UA-81053435-2" - }, - "edit-link": { - "base": "https://github.com/arangodb/arangodb/edit/devel/Documentation/Books/Drivers", - "label": "Edit Page" - }, - "localized-footer": { - "filename": "FOOTER.html" - } - } -} diff --git a/Documentation/Books/Drivers/styles/header.css b/Documentation/Books/Drivers/styles/header.css deleted file mode 100644 index 4ec87c77b0e5..000000000000 --- a/Documentation/Books/Drivers/styles/header.css +++ /dev/null @@ -1,305 +0,0 @@ -/* Design fix because of the header */ -@import url(https://fonts.googleapis.com/css?family=Roboto:400,500,300,700); - -body { - overflow: hidden; - font-family: Roboto, Helvetica, sans-serif; - background: #444444; -} - -.book .book-header h1 a, .book .book-header h1 a:hover { - display: none; -} - -/* GOOGLE START */ - -.google-search #gsc-iw-id1{ - border: none !important; -} - -.google-search .gsst_b { - position: relative; - top: 10px; - left: -25px; - width: 1px; -} - -.gsst_a .gscb_a { - color: #c01a07 !important; -} - -.google-search input { - background-color: #fff !important; - font-family: Roboto, Helvetica, sans-serif; - font-size: 10pt !important; - padding-left: 5px !important; - float: right; - position: relative; - top: 8px; - width: 100% !important; - height: 30px !important; -} - -.google-search input:active { -} - -.google-search { - margin-right: 10px; - margin-left: 10px !important; - float: right !important; -} - -.google-search td, -.google-search table, -.google-search tr, -.google-search th { - background-color: #444444 !important; -} - -.google-search .gsc-input-box, -.google-search .gsc-input-box input { - border-radius: 3px !important; - width: 200px; -} - -.gsc-branding-text, -.gsc-branding-img, -.gsc-user-defined-text { - display: none !important; -} - -.google-search .gsc-input-box input { - font-size: 16px !important; -} - -.google-search .gsc-search-button { - display: none !important; -} - -.google-search .gsc-control-cse { - padding: 10px !important; -} - -.google-search > div { - float: left !important; - width: 200px !important; -} - -/* GOOGLE END */ - -.book-summary, -.book-body { - margin-top: 48px; -} - -.arangodb-logo, .arangodb-logo-small { - display: inline; - float: left; - padding-top: 12px; - margin-left: 10px; -} - -.arangodb-logo img { - height: 23px; -} - -.arangodb-logo-small { - display: none; -} - -.arangodb-version-switcher { - width: 65px; - height: 44px; - margin-left: 16px; - float: left; - display: inline; - font-weight: bold; - color: #fff; - background-color: inherit; - border: 0; -} - -.arangodb-version-switcher option { - background-color: white; - color: black; -} - - -.arangodb-header { - position: fixed; - width: 100%; - height: 48px; - z-index: 1; -} - -.arangodb-header .socialIcons-googlegroups a img { - position: relative; - height: 14px; - top: 3px; -} - -.arangodb-navmenu { - display: block; - float: right; - margin: 0; - padding: 0; -} - -.arangodb-navmenu li { - display: block; - float: left; -} - -.arangodb-navmenu li a { - display: block; - float: left; - padding: 0 10px; - line-height: 48px; - font-size: 16px; - font-weight: 400; - color: #fff; - text-decoration: none; - font-family: Roboto, Helvetica, sans-serif; -} - -.arangodb-navmenu li.active-tab a, .arangodb-navmenu li a:hover { - background-color: #88A049 !important; -} - -.downloadIcon { - margin-right: 10px; -} - -/** simple responsive updates **/ - -@media screen and (max-width: 1000px) { - .arangodb-navmenu li a { - padding: 0 6px; - } - - .arangodb-logo { - margin-left: 10px; - } - - .google-search { - margin-right: 5px !important; - } - - .downloadIcon { - margin-right: 0; - } - - .socialIcons { - display: none !important; - } -} - - -@media screen and (max-width: 800px) { - - .google-search, - .google-search .gsc-input-box, - .google-search .gsc-input-box input { - width: 130px !important; - } - - .arangodb-navmenu li a { - font-size: 15px; - padding: 0 7px; - } - - .arangodb-logo { - display: none; - } - - .arangodb-logo-small { - display: inline; - margin-left: 10px; - } - - .arangodb-logo-small img { - height: 20px; - } - - .arangodb-version-switcher { - margin: 0; - } - -} - -@media screen and (max-width: 600px) { - .arangodb-navmenu li a { - font-size: 15px; - padding: 0 7px; - } - - .arangodb-version-switcher, - .downloadIcon { - display: none !important; - } - - .google-search, - .google-search .gsc-input-box, - .google-search .gsc-input-box input { - width: 24px !important; - } - - .google-search .gsc-input-box input[style] { - background: url(https://docs.arangodb.com/assets/searchIcon.png) left center no-repeat rgb(255, 255, 255) !important; - } - - .google-search .gsc-input-box input:focus { - width: 200px !important; - position: relative; - left: -176px; - background-position: -9999px -9999px !important; - } - -} - -@media screen and (max-width: 400px) { - .arangodb-navmenu li a { - font-size: 13px; - padding: 0 5px; - } - .google-search { - display: none; - } -} - -/*Hubspot Cookie notice */ - -body div#hs-eu-cookie-confirmation { - bottom: 0; - top: auto; - position: fixed; - text-align: center !important; -} - -body div#hs-eu-cookie-confirmation.can-use-gradients { - background-image: linear-gradient(to bottom, rgba(255,255,255,0.9),rgba(255,255,255,0.75)); -} - -body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner { - display: inline-block; - padding: 15px 18px 0; -} - -body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner #hs-en-cookie-confirmation-buttons-area { - float: left; -} - -body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner a#hs-eu-confirmation-button { - background-color: #577138 !important; - border: none !important; - text-shadow: none !important; - box-shadow: none; - padding: 5px 15px !important; - margin-left: 10px; -} - -body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner > p { - float: left; - color: #000 !important; - text-shadow: none; -} diff --git a/Documentation/Books/Drivers/styles/header.js b/Documentation/Books/Drivers/styles/header.js deleted file mode 100644 index 9466cd6ff7a6..000000000000 --- a/Documentation/Books/Drivers/styles/header.js +++ /dev/null @@ -1,161 +0,0 @@ -// Try to set the version number early, jQuery not available yet -var searcheable_versions = [@BROWSEABLE_VERSIONS@]; -var cx = '@GSEARCH_ID@'; - -document.addEventListener("DOMContentLoaded", function(event) { - if (!gitbook.state.root) return; - var bookVersion = gitbook.state.root.match(/\/(\d\.\d(\.\d)?|devel)\//); - var switcher = document.getElementsByClassName("arangodb-version-switcher")[0]; - if (bookVersion) { - switcher.value = bookVersion[1]; - } else { - switcher.style.display = "none"; - } -}); - -window.onload = function(){ -window.localStorage.removeItem(":keyword"); - -$(document).ready(function() { - -function appendHeader() { - var VERSION_SELECTOR = "" - var i = 0; - var prefix; - for (i = 0; i < searcheable_versions.length; i++ ) { - if (searcheable_versions[i] === 'devel') { - prefix = ''; - } else { - prefix = 'v'; - } - VERSION_SELECTOR += '\n'; - } - - var div = document.createElement('div'); - div.innerHTML = '
\n' + - ' \n' + - '
\n' + - ' \n' + - ' \n' + - ' \n' + - '
\n' + - ' \n' + - ' \n' + - ' \n' + - '
\n'; - - $('.book').before(div.innerHTML); - - }; - - - function rerenderNavbar() { - $('#header').remove(); - appendHeader(); - }; - - //render header - rerenderNavbar(); - function addGoogleSrc() { - var gcse = document.createElement('script'); - gcse.type = 'text/javascript'; - gcse.async = true; - gcse.src = (document.location.protocol == 'https:' ? 'https:' : 'http:') + - '//cse.google.com/cse.js?cx=' + cx; - var s = document.getElementsByTagName('script')[0]; - s.parentNode.insertBefore(gcse, s); - }; - addGoogleSrc(); - - $(".arangodb-navmenu a[data-book]").on("click", function(e) { - e.preventDefault(); - var urlSplit = gitbook.state.root.split("/"); - urlSplit.pop(); // "" - urlSplit.pop(); // e.g. "Manual" - window.location.href = urlSplit.join("/") + "/" + e.target.getAttribute("data-book") + "/index.html"; - }); - - // set again using jQuery to accommodate non-standard browsers (*cough* IE *cough*) - var bookVersion = gitbook.state.root.match(/\/(\d\.\d(\.\d)?|devel)\//); - var switcher = $(".arangodb-version-switcher"); - if (bookVersion) { - switcher.val(bookVersion[1]); - } else { - switcher.hide(); - } - - $(".arangodb-version-switcher").on("change", function(e) { - var urlSplit = gitbook.state.root.split("/"); - urlSplit.pop(); // "" - var currentBook = urlSplit.pop(); // e.g. "Manual" - urlSplit.pop() // e.g. "3.0" - if (e.target.value == "2.8") { - var legacyMap = { - "Manual": "", - "AQL": "/Aql", - "HTTP": "/HttpApi", - "Cookbook": "/Cookbook" - }; - currentBook = legacyMap[currentBook]; - } else { - currentBook = "/" + currentBook; - } - window.location.href = urlSplit.join("/") + "/" + e.target.value + currentBook + "/index.html"; - }); - -}); - -}; diff --git a/Documentation/Books/Drivers/styles/hs.js b/Documentation/Books/Drivers/styles/hs.js deleted file mode 100644 index 9a8ae18a61d2..000000000000 --- a/Documentation/Books/Drivers/styles/hs.js +++ /dev/null @@ -1,33 +0,0 @@ -// HubSpot Script Loader. Please do not block this resource. See more: http://hubs.ly/H0702_H0 - -(function (id, src, attrs) { - if (document.getElementById(id)) { - try { console.warn('duplicate hubspot script with id: "' + id + '" included on page'); } - finally { return; } - } - var js = document.createElement('script'); - js.src = src; - js.type = 'text/javascript'; - js.id = id; - for (var name in attrs) { if(attrs.hasOwnProperty(name)) { js.setAttribute(name, attrs[name]); } } - var e = document.getElementsByTagName('script')[0]; - e.parentNode.insertBefore(js, e); -})('hubspot-messages-loader', 'https://js.usemessages.com/messageswidgetshell.js', {"data-loader":"hs-scriptloader","data-hsjs-portal":2482448,"data-hsjs-env":"prod"}); - -(function (id, src) { - if (document.getElementById(id)) { return; } - var js = document.createElement('script'); - js.src = src; - js.type = 'text/javascript'; - js.id = id; - var e = document.getElementsByTagName('script')[0]; - e.parentNode.insertBefore(js, e); -})('hs-analytics', '//js.hs-analytics.net/analytics/1508760300000/2482448.js'); - -window.setTimeout(function () { - $('body').on('click', 'a', function () { - var _hsq = window._hsq = window._hsq || []; - _hsq.push(['setPath', window.location.pathname]); - _hsq.push(['trackPageView']); - }); -}, 1000); diff --git a/Documentation/Books/Drivers/styles/website.css b/Documentation/Books/Drivers/styles/website.css deleted file mode 100644 index 0bbc2f1eff37..000000000000 --- a/Documentation/Books/Drivers/styles/website.css +++ /dev/null @@ -1,84 +0,0 @@ -.markdown-section small { - font-size: 80%; -} -.markdown-section sub, .markdown-section sup { - font-size: 75%; - line-height: 0; - position: relative; - vertical-align: baseline; -} -.markdown-section sup { - top: -.5em; -} -.markdown-section sub { - bottom: -.25em; -} - -div.example_show_button { - border: medium solid lightgray; - text-align: center; - position: relative; - top: -10px; - display: flex; - justify-content: center; -} - -.book .book-body .navigation.navigation-next { - right: 10px !important; -} - -.book .book-summary ul.summary li.active>a,.book .book-summary ul.summary li a:hover { - color: #fff !important; - background: #80A54D !important; - text-decoration: none; -} - -.book .book-body .page-wrapper .page-inner section.normal .deprecated{ - background-color: rgba(240,240,0,0.4); -} - -.book .book-body section > ul li:last-child { - margin-bottom: 0.85em; -} - -.book .book-body .alert p:last-child { - margin-bottom: 0; -} - -.columns-3 { - -webkit-column-count: 3; - -moz-column-count: 3; - -ms-column-count: 3; - -o-column-count: 3; - column-count: 3; - columns: 3; -} - -.localized-footer { - opacity: 0.5; -} - -.example-container { - position: relative; -} - -.example-container a.anchorjs-link { - position: absolute; - top: 10px; - right: 10px; - font: 1em/1 anchorjs-icons; -} - -.gsib_a { -padding: 0px !important; -} - -.gsc-control-cse { -border: 0px !important; -background-color: transparent !important; -} - - -.gsc-input { -margin: 0px !important; -} diff --git a/Documentation/Books/GNUmakefile b/Documentation/Books/GNUmakefile deleted file mode 100644 index d3cb01fc52c8..000000000000 --- a/Documentation/Books/GNUmakefile +++ /dev/null @@ -1,14 +0,0 @@ -# Wrapper makefile to maintain compatibility with the old syntax - don't use. - -build-books: - ./build.sh build-books - -build-dist-books: - ./build.sh build-dist-books --outputDir "$(OUTPUT_DIR)" --nodeModulesDir "$(NODE_MODULES_DIR)" --cookBook "$(COOKBOOK_DIR)" - -clean: - ./build.sh clean - -all: build-books - -.PHONY: build-books diff --git a/Documentation/Books/HTTP/.gitkeep b/Documentation/Books/HTTP/.gitkeep new file mode 100644 index 000000000000..936ca3adc4e3 --- /dev/null +++ b/Documentation/Books/HTTP/.gitkeep @@ -0,0 +1,5 @@ +Git can not track empty repositories. +This file ensures that the directory is kept. + +Some of the old documentation building scripts are still +used by the new system which copy files into this folder. \ No newline at end of file diff --git a/Documentation/Books/HTTP/AdministrationAndMonitoring/README.md b/Documentation/Books/HTTP/AdministrationAndMonitoring/README.md deleted file mode 100644 index b481e62f6fc8..000000000000 --- a/Documentation/Books/HTTP/AdministrationAndMonitoring/README.md +++ /dev/null @@ -1,56 +0,0 @@ -HTTP Interface for Administration and Monitoring -================================================ - -This is an introduction to ArangoDB's HTTP interface for administration and -monitoring of the server. - -Logs ----- - - - -@startDocuBlock get_admin_log - -@startDocuBlock get_admin_loglevel - -@startDocuBlock put_admin_loglevel - -Statistics ----------- - - - -@startDocuBlock get_admin_statistics - - - -@startDocuBlock get_admin_statistics_description - -Cluster -------- - - - -@startDocuBlock get_admin_server_mode - -@startDocuBlock put_admin_server_mode - -@startDocuBlock get_admin_server_id - -@startDocuBlock get_admin_server_role - -@startDocuBlock get_admin_server_availability - - - -@startDocuBlock get_cluster_statistics - -@startDocuBlock get_cluster_health - - -Other ------ - - - -@startDocuBlock get_admin_routing_reloads diff --git a/Documentation/Books/HTTP/Agency/README.md b/Documentation/Books/HTTP/Agency/README.md deleted file mode 100644 index 08123020e56e..000000000000 --- a/Documentation/Books/HTTP/Agency/README.md +++ /dev/null @@ -1,401 +0,0 @@ -HTTP Interface for Agency feature -================================= - -The Agency is the ArangoDB component which manages the entire ArangoDB cluster. -ArangoDB itself mainly uses the Agency as a central place to store the configuration -and the cluster nodes health management. It implements the Raft consensus protocol to act as -the single-source of truth for the entire cluster. You may know other software providing similar functionality e.g. _Apache Zookeeper_, _etcd_ or _Consul_. - -To an end-user the Agency is essentially a fault-tolerant Key-Value Store with a simple REST-API. -It is possible to use the Agency API for a variety of use-cases, for example: - -- Centralized configuration repository -- Service discovery registry -- Distributed synchronization service -- Distributed Lock-Manager - -*Note 1*: To access the Agency API with authentication enabled, you need to include an authorization header -with every request. The authorization header _must_ contain a *superuser JWT Token*; For more information see the [authentication section](../General/README.md#authentication). - -*Note 2*: The key-prefix `/arango` contains ArangoDBs internal configuration. You should _never_ change any values below the _arango_ key. - -### Key-Value store APIs - -Generally, all document IO to and from the key-value store consists of JSON arrays. The outer array is an envelope for multiple read or write transactions. The results are arrays are an envelope around the results corresponding to the order of the incoming transactions. - -Consider the following write operation into a pristine agency: - - -``` -curl -L http://$SERVER:$PORT/_api/agency/write -d '[[{"a":{"op":"set","new":{"b":{"c":[1,2,3]},"e":12}},"d":{"op":"set","new":false}}]]' -``` -```js -[{results:[1]}] - -``` - -And the subsequent read operation - -``` -curl -L http://$SERVER:$PORT/_api/agency/read -d '[["/"]]' -``` -```js -[ - { - "a": { - "b": { - "c": [1,2,3] - }, - "e": 12 - }, - "d": false - } -] -``` - -In the first step we committed a single transaction that commits the JSON document inside the inner transaction array to the agency. The result is `[1]`, which is the replicated log index. Repeated invocation will yield growing log numbers 2, 3, 4, etc. - -The read access is a complete access to the key-value store indicated by access to its root element and returns the result as an array corresponding to the outermost array in the read transaction. - -Let's dig in some deeper. - -### Read API - -Let's start with the above initialized key-value store in the following. Let us visit the following read operations: - -``` -curl -L http://$SERVER:$PORT/_api/agency/read -d '[["/a/b"]]' -``` -```js -[ - { - "a": { - "b": { - "c": [1,2,3] - } - } - } -] -``` - -And - -``` -curl -L http://$SERVER:$PORT/_api/agency/read -d '[["/a/b/c"]]' -``` -```js -[ - { - "a": { - "b": { - "c": [1,2,3] - } - } - } -] -``` - -Note that the above results are identical, meaning that results obtained from the agency are always return with full path. - -The second outer array brackets in read operations correspond to transactions, meaning that the result is guaranteed to have been acquired without a write transaction in between: - -``` -curl -L http://$SERVER:$PORT/_api/agency/read -d '[["/a/e"],["/d","/a/b"]]' -``` -```js -[ - { - "a": { - "e": 12 - } - }, - { - "a": { - "b": { - "c": [1,2,3 - ] - } - }, - "d": false - } -] -``` - -While the first transaction consists of a single read access to the key-value-store thus stretching the meaning of the word transaction, the second bracket actually hold two disjunct read accesses, which have been joined within zero-time, i.e. without a write access in between. That is to say that `"/d"` cannot have changed before `"/a/b"` had been acquired. - -Let's try to fetch a value from the key-value-store, which does not exist: - -``` -curl -L http://$SERVER:$PORT/_api/agency/read -d '[["/a/b/d"]]' -``` -```js -[ - { - "a": { - "b": {} - } - } -] -``` - -The result returns the cross section of the requested path and the key-value-store contents. `"/a/b"` exists, but there is no key `"/a/b/d"`. Thus the following transaction will yield: - -``` -curl -L http://$SERVER:$PORT/_api/agency/read -d '[["/a/b/d","/d"]]' -``` -```js -[ - { - "a": { - "b": {} - }, - "d": false - } -] -``` - -And this last read operation should return: - -``` -curl -L http://$SERVER:$PORT/_api/agency/read -d '[["/a/b/c"],["/a/b/d"],["/a/x/y"],["/y"],["/a/b","/a/x" ]]' -``` -```js -[ - {"a":{"b":{"c":[1,2,3]}}}, - {"a":{"b":{}}}, - {"a":{}}, - {}, - {"a":{"b":{"c":[1,2,3]}}} -] - -``` - -### Write API - -The write API must obviously be more versatile and needs a more detailed appreciation. Write operations are arrays of transactions with preconditions, i.e. `[[U,P]]`, where the system tries to apply all updates in the outer array in turn, rejecting those whose precondition is not fulfilled by the current state. It is guaranteed that the transactions in the write request are sequenced adjacent to each other (with no intervention from other write requests). Only the ones with failed preconditions are left out. - -For `P`, the value of a key is an object with attributes `"old"`, `"oldNot"`, `"oldEmpty"` or `"isArray"`. With `"old"` one can specify a JSON value that has to be present for the condition to be fulfilled. With `"oldNot"` one may check for a value to not be equal to the test. While with `"oldEmpty"`, which can take a boolean value, one can specify that the key value needs to be not set `true` or set to an arbitrary value `false`. With `"isArray"` one can specify that the value must be an array. As a shortcut, `"old"` values of scalar or array type may be stored directly in the attribute. -Examples: - -```js -{ "/a/b/c": { "old": [1,2,3] }} -``` - -is a precondition specifying that the previous value of the key `"/a/b/c"` key must be `[1,2,3]`. If and only if the value of the precondition is not an object we provide a notation, where the keywork `old` may be omitted. Thus, the above check may be shortcut as - -```js -{ "/a/b/c": [1, 2, 3] } -``` - -Consider the agency in initialized as above let's review the responses from the agency as follows: - -``` -curl -L http://$SERVER:$PORT/_api/agency/write -d '[[{"/a/b/c":{"op":"set","new":[1,2,3,4]},"/a/b/pi":{"op":"set","new":"some text"}},{"/a/b/c":{"old":[1,2,3]}}]]' -``` -```js -{ - "results": [19] -} -``` - -The condition is fulfilled in the first run and would be wrong in a second returning - -```js -{ - "results": [0] -} -``` - -`0` as a result means that the precondition failed and no "real" log number was returned. - -```js -{ "/a/e": { "oldEmpty": false } } -``` - -means that the value of the key `"a/e"` must be set (to something, which can be `null`!). The condition - -```js -{ "/a/e": { "oldEmpty": true } } -``` - -means that the value of the key `"a/e"` must be unset. The condition - -``` -{ "/a/b/c": { "isArray": true } } -``` - -means that the value of the key `"a/b/c"` must be an array. - -The update value U is an object, the attribute names are again key strings and the values are objects with optional attributes `"new"`, `"op"` and `"ttl"`. They have the following meaning: - -`"op"` determines the operation, possible values are `"set"` (the default, if left out), `"delete"`, `"increment"`, `"decrement"`, `"push"`, `"pop"`, `"shift"` or `"prepend"` - -`"new"` is the new value, can be omitted for the `"delete"` operation and for `"increment"` and `"decrement"`, where `1` is implied - -`"ttl"`, if present, the new value that is being set gets a time to live in seconds, given by a numeric value in this attribute. It is only guaranteed that the actual removal of the value is done according to the system clock, so up to clock skew between servers. The removal is done by an additional write transaction that is automatically generated between the regular writes. - -Additional rule: If none of `"new"` and `"op"` is set or the value is not even an object, then this is to be interpreted as if it were -```js -{ "op": "set", "new": } -``` -which amounts to setting the value with no precondition. - -Examples: - -```js -{ "/a": { "op": "set", "new": 12 } } -``` - -sets the value of the key `"/a"` to `12`. The same could have been achieved by - -```js -{ "/a": 12 } -``` - -or by - -```js -{ "/a": { "new": 12} } -``` - -The operation - -```js -{ "/a/b": { "new": { "c": [1,2,3,4] } } } -``` - -sets the key `"/a/b"` to `{"c": [1,2,3,4]}`. Note that in the above example this is the same as setting the value of `"/a/b/c"` to `[1,2,3,4]`. The difference is, that if `a/b` had other sub attributes, then this transaction would delete all these other attributes and make `"/a/b"` equal to `{"c": [1,2,3,4]}`, whereas setting `"/a/b/c"` to `[1,2,3,4]` would retain all attributes other than `"c"` in `"/a/b"`. - -Here are some more examples for full transactions (update/precondition pairs). The transaction - -```js -[ { "/a/b": { "new": { "c": [1,2,3,4] } } }, - { "/a/b": { "old": { "c": [1,2,3] } } } ] -``` - -sets the key `"/a/b"` to `{"c":[1,2,3,4]}` if and only if it was `{"c":[1,2,3]}` before. Note that this fails if `"/a/b"` had other attributes than `"c"`. The transaction - -```js -[ { "/x": { "op": "delete" } }, - { "/x": { "old": false } } ] -``` - -clears the value of the key `"/x"` if this old value was false. - -```js -[ { "/y": { "new": 13 }, - { "/y": { "oldEmpty": true } } } -``` - -sets the value of `"/y"` to `13`, but only, if it was unset before. - -```js -[ { "/z": { "op": "push", "new": "Max" } } ] -``` - -appends the string `"Max"` to the end of the list stored in the `"z"` attribute, or creates an array `["Max"]` in `"z"` if it was unset or not an array. - -```js -[ { "/u": { "op": "pop" } } ] -``` -removes the last entry of the array stored under `"u"`, if the value of `"u"` is not set or not an array. - -### HTTP-headers for write operations - -`X-ArangoDB-Agency-Mode` with possible values `"waitForCommitted"`, `"waitForSequenced"` and `"noWait"`. - -In the first case the write operation only returns when the commit to the replicated log has actually happened. In the second case the write operation returns when the write transactions that fulfilled their preconditions have been sequenced and thus it is known, which of the write transactions in the given array had fulfilled preconditions. In both cases the body is a JSON array containing the indexes of the transactions in the list that had fulfilled preconditions. - -In the last case, `"noWait"`, the operation returns immediately, an empty body is returned. To get any information about the result of the operation one has to specify a tag (see below) and ask about the status later on. - -`X-ArangoDB-Agency-Tag` with an arbitrary UTF-8 string value. - -### Observers - -External services to the agency may announce themselves or others to be observers of arbitrary existing or future keys in the key-value-store. The agency must then inform the observing service of any changes to the subtree below the observed key. The notification is done by virtue of POST requests to a required valid URL. - -In order to observe any future modification below say `"/a/b/c"`, a observer is announced through posting the below document to the agency’s write REST handler: - -```js -[ { "/a/b/c": - { "op": "observe", - "url": "http://:/" - } - } ] -``` - -The observer is notified of any changes to that target until such time that it removes itself as an observer of that key through - -```js -[ { "/a/b/c": - { "op": "unobserve", - "url": “http://:/" } } ] -``` - -Note that the last document removes all observations from entities below `"/a/b/c"`. In particular, issuing - -```js -[ { "/": "unobserve", "url": "http://:/"} ] -``` - -will result in the removal of all observations for URL `"http://:/"`. -The notifying POST requests are submitted immediately with any complete array of changes to the read db of the leader of create, modify and delete events accordingly; The body -```js -{ "term": "5", - "index": 167, - "/": { - "/a/b/c" : { "op": "modify", "old": 1, "new": 2 } }, - "/constants/euler" : {"op": "create", "new": 2.718281828459046 }, - "/constants/pi": { "op": "delete" } } } -``` - -### Configuration - -At all times, i.e. regardless of the state of the agents and the current health of the RAFT consensus, one can invoke the configuration API: - - curl http://$SERVER:$PORT/_api/agency/config - -Here, and in all subsequent calls, we assume that `$SERVER` is -replaced by the server name and `$PORT` is replaced by the port -number. We use `curl` throughout for the examples, but any client -library performing HTTP requests should do. -The output might look somewhat like this - -```js -{ - "term": 1, - "leaderId": "f5d11cde-8468-4fd2-8747-b4ef5c7dfa98", - "lastCommitted": 1, - "lastAcked": { - "ac129027-b440-4c4f-84e9-75c042942171": 0.21, - "c54dbb8a-723d-4c82-98de-8c841a14a112": 0.21, - "f5d11cde-8468-4fd2-8747-b4ef5c7dfa98": 0 - }, - "configuration": { - "pool": { - "ac129027-b440-4c4f-84e9-75c042942171": "tcp://localhost:8531", - "c54dbb8a-723d-4c82-98de-8c841a14a112": "tcp://localhost:8530", - "f5d11cde-8468-4fd2-8747-b4ef5c7dfa98": "tcp://localhost:8529" - }, - "active": [ - "ac129027-b440-4c4f-84e9-75c042942171", - "c54dbb8a-723d-4c82-98de-8c841a14a112", - "f5d11cde-8468-4fd2-8747-b4ef5c7dfa98" - ], - "id": "f5d11cde-8468-4fd2-8747-b4ef5c7dfa98", - "agency size": 3, - "pool size": 3, - "endpoint": "tcp://localhost:8529", - "min ping": 0.5, - "max ping": 2.5, - "supervision": false, - "supervision frequency": 5, - "compaction step size": 1000, - "supervision grace period": 120 - } -} -``` - -This is the actual output of a healthy agency. The configuration of the agency is found in the `configuration` section as you might have guessed. It is populated by static information on the startup parameters like `agency size`, the once generated `unique id` etc. It holds information on the invariants of the RAFT algorithm and data compaction. - -The remaining data reflect the variant entities in RAFT, as `term` and `leaderId`, also some debug information on how long the last leadership vote was received from any particular agency member. Low term numbers on a healthy network are an indication of good operation environment, while often increasing term numbers indicate, that the network environment and stability suggest to raise the RAFT parameters `min ping` and 'max ping' accordingly. diff --git a/Documentation/Books/HTTP/Analyzers/README.md b/Documentation/Books/HTTP/Analyzers/README.md deleted file mode 100644 index 79bf2cb4330f..000000000000 --- a/Documentation/Books/HTTP/Analyzers/README.md +++ /dev/null @@ -1,91 +0,0 @@ -HTTP Interface for Analyzers -============================ - -The REST API is accessible via the `/_api/analyzer` endpoint URL callable via -HTTP requests. - -Analyzer Operations -------------------- - -@startDocuBlock post_api_analyzer - -@startDocuBlock get_api_analyzer - -@startDocuBlock get_api_analyzers - -@startDocuBlock delete_api_analyzer - -Analyzer Types --------------- - -The currently implemented Analyzer types are: - -- `identity` -- `delimited` -- `ngram` -- `text` - -### Identity - -An analyzer applying the `identity` transformation, i.e. returning the input -unmodified. - -The value of the *properties* attribute is ignored. - -### Delimited - -An analyzer capable of breaking up delimited text into tokens as per RFC4180 -(without starting new records on newlines). - -The *properties* allowed for this analyzer are either: - -- a string encoded delimiter to use -- an object with the attribute *delimiter* containing the string encoded - delimiter to use - -### N-gram - -An analyzer capable of producing n-grams from a specified input in a range of -[min;max] (inclusive). Can optionally preserve the original input. - -The *properties* allowed for this analyzer are an object with the following -attributes: - -- *max*: unsigned integer (required) maximum n-gram length -- *min*: unsigned integer (required) minimum n-gram length -- *preserveOriginal*: boolean (required) output the original value as well - -*Example* - -With `min` = 4 and `max` = 5, the analyzer will produce the following n-grams -for the input *foobar*: -- foob -- ooba -- obar -- fooba -- oobar - -With `preserveOriginal` enabled, it will additionally include *foobar* itself. - -### Text - -An analyzer capable of breaking up strings into individual words while also -optionally filtering out stop-words, applying case conversion and extracting -word stems. - -The *properties* allowed for this analyzer are an object with the following -attributes: - -- `locale`: string (required) format: (language[_COUNTRY][.encoding][@variant]) -- `case_convert`: string enum (optional) one of: `lower`, `none`, `upper`, - default: `lower` -- `ignored_words`: array of strings (optional) words to omit from result, - default: load words from `ignored_words_path` -- `ignored_words_path`: string(optional) path with the `language` sub-directory - containing files with words to omit, default: if no - `ignored_words` provided then the value from the - environment variable `IRESEARCH_TEXT_STOPWORD_PATH` or - if undefined then the current working directory -- `no_accent`: boolean (optional) apply accent removal, default: true -- `no_stem`: boolean (optional) do not apply stemming on returned words, - default: false diff --git a/Documentation/Books/HTTP/Api/README.md b/Documentation/Books/HTTP/Api/README.md deleted file mode 100644 index ebf00685fd34..000000000000 --- a/Documentation/Books/HTTP/Api/README.md +++ /dev/null @@ -1,10 +0,0 @@ -HTTP Interface -============== - -Following you have ArangoDB's HTTP Interface for Documents, Databases, Edges and more. - -There are also some examples provided for every API action. - -You may also use the interactive [Swagger documentation](http://swagger.io) in the -[ArangoDB webinterface](../../Manual/Programs/WebInterface/index.html) -to explore the API calls below. \ No newline at end of file diff --git a/Documentation/Books/HTTP/AqlQuery/README.md b/Documentation/Books/HTTP/AqlQuery/README.md deleted file mode 100644 index 8a19e427afd5..000000000000 --- a/Documentation/Books/HTTP/AqlQuery/README.md +++ /dev/null @@ -1,49 +0,0 @@ -HTTP Interface for AQL Queries -============================== - -### Explaining and parsing queries - -ArangoDB has an HTTP interface to syntactically validate AQL queries. -Furthermore, it offers an HTTP interface to retrieve the execution plan for any -valid AQL query. - -Both functionalities do not actually execute the supplied AQL query, but only -inspect it and return meta information about it. - - - -@startDocuBlock post_api_explain - -@startDocuBlock PostApiQueryProperties - -### Query tracking - -ArangoDB has an HTTP interface for retrieving the lists of currently -executing AQL queries and the list of slow AQL queries. In order to make meaningful -use of these APIs, query tracking needs to be enabled in the database the HTTP -request is executed for. - - -@startDocuBlock GetApiQueryProperties - - -@startDocuBlock PutApiQueryProperties - - -@startDocuBlock GetApiQueryCurrent - - -@startDocuBlock GetApiQuerySlow - - -@startDocuBlock DeleteApiQuerySlow - -### Killing queries - -Running AQL queries can also be killed on the server. ArangoDB provides a kill facility -via an HTTP interface. To kill a running query, its id (as returned for the query in the -list of currently running queries) must be specified. The kill flag of the query will -then be set, and the query will be aborted as soon as it reaches a cancelation point. - - -@startDocuBlock DeleteApiQueryKill diff --git a/Documentation/Books/HTTP/AqlQueryCache/README.md b/Documentation/Books/HTTP/AqlQueryCache/README.md deleted file mode 100644 index 94bd3c261d66..000000000000 --- a/Documentation/Books/HTTP/AqlQueryCache/README.md +++ /dev/null @@ -1,13 +0,0 @@ -HTTP Interface for the AQL query results cache -============================================== - -This section describes the API methods for controlling the AQL query results cache. - -@startDocuBlock GetApiQueryCacheCurrent - -@startDocuBlock DeleteApiQueryCache - -@startDocuBlock GetApiQueryCacheProperties - -@startDocuBlock PutApiQueryCacheProperties - diff --git a/Documentation/Books/HTTP/AqlQueryCursor/AccessingCursors.md b/Documentation/Books/HTTP/AqlQueryCursor/AccessingCursors.md deleted file mode 100644 index 419dcb2c9945..000000000000 --- a/Documentation/Books/HTTP/AqlQueryCursor/AccessingCursors.md +++ /dev/null @@ -1,11 +0,0 @@ -Accessing Cursors via HTTP -========================== - - -@startDocuBlock post_api_cursor - - -@startDocuBlock post_api_cursor_identifier - - -@startDocuBlock post_api_cursor_delete diff --git a/Documentation/Books/HTTP/AqlQueryCursor/QueryResults.md b/Documentation/Books/HTTP/AqlQueryCursor/QueryResults.md deleted file mode 100644 index 3a0c2bc5cbc8..000000000000 --- a/Documentation/Books/HTTP/AqlQueryCursor/QueryResults.md +++ /dev/null @@ -1,232 +0,0 @@ -Retrieving query results -======================== - -Select queries are executed on-the-fly on the server and the result -set will be returned back to the client. - -There are two ways the client can get the result set from the server: - -* In a single roundtrip -* Using a cursor - -Single roundtrip ----------------- - -The server will only transfer a certain number of result documents back to the -client in one roundtrip. This number is controllable by the client by setting -the *batchSize* attribute when issuing the query. - -If the complete result can be transferred to the client in one go, the client -does not need to issue any further request. The client can check whether it has -retrieved the complete result set by checking the *hasMore* attribute of the -result set. If it is set to *false*, then the client has fetched the complete -result set from the server. In this case no server side cursor will be created. - -```js -> curl --data @- -X POST --dump - http://localhost:8529/_api/cursor -{ "query" : "FOR u IN users LIMIT 2 RETURN u", "count" : true, "batchSize" : 2 } - -HTTP/1.1 201 Created -Content-type: application/json - -{ - "hasMore" : false, - "error" : false, - "result" : [ - { - "name" : "user1", - "_rev" : "210304551", - "_key" : "210304551", - "_id" : "users/210304551" - }, - { - "name" : "user2", - "_rev" : "210304552", - "_key" : "210304552", - "_id" : "users/210304552" - } - ], - "code" : 201, - "count" : 2 -} -``` - -Using a cursor --------------- - -If the result set contains more documents than should be transferred in a single -roundtrip (i.e. as set via the *batchSize* attribute), the server will return -the first few documents and create a temporary cursor. The cursor identifier -will also be returned to the client. The server will put the cursor identifier -in the *id* attribute of the response object. Furthermore, the *hasMore* -attribute of the response object will be set to *true*. This is an indication -for the client that there are additional results to fetch from the server. - -*Examples*: - -Create and extract first batch: - -```js -> curl --data @- -X POST --dump - http://localhost:8529/_api/cursor -{ "query" : "FOR u IN users LIMIT 5 RETURN u", "count" : true, "batchSize" : 2 } - -HTTP/1.1 201 Created -Content-type: application/json - -{ - "hasMore" : true, - "error" : false, - "id" : "26011191", - "result" : [ - { - "name" : "user1", - "_rev" : "258801191", - "_key" : "258801191", - "_id" : "users/258801191" - }, - { - "name" : "user2", - "_rev" : "258801192", - "_key" : "258801192", - "_id" : "users/258801192" - } - ], - "code" : 201, - "count" : 5 -} -``` - -Extract next batch, still have more: - -```js -> curl -X PUT --dump - http://localhost:8529/_api/cursor/26011191 - -HTTP/1.1 200 OK -Content-type: application/json - -{ - "hasMore" : true, - "error" : false, - "id" : "26011191", - "result": [ - { - "name" : "user3", - "_rev" : "258801193", - "_key" : "258801193", - "_id" : "users/258801193" - }, - { - "name" : "user4", - "_rev" : "258801194", - "_key" : "258801194", - "_id" : "users/258801194" - } - ], - "code" : 200, - "count" : 5 -} -``` - -Extract next batch, done: - -```js -> curl -X PUT --dump - http://localhost:8529/_api/cursor/26011191 - -HTTP/1.1 200 OK -Content-type: application/json - -{ - "hasMore" : false, - "error" : false, - "result" : [ - { - "name" : "user5", - "_rev" : "258801195", - "_key" : "258801195", - "_id" : "users/258801195" - } - ], - "code" : 200, - "count" : 5 -} -``` - -Do not do this because *hasMore* now has a value of false: - -```js -> curl -X PUT --dump - http://localhost:8529/_api/cursor/26011191 - -HTTP/1.1 404 Not Found -Content-type: application/json - -{ - "errorNum": 1600, - "errorMessage": "cursor not found: disposed or unknown cursor", - "error": true, - "code": 404 -} -``` - -Modifying documents -------------------- - -The `_api/cursor` endpoint can also be used to execute modifying queries. - -The following example appends a value into the array `arrayValue` of the document -with key `test` in the collection `documents`. Normal update behavior is to -replace the attribute completely, and using an update AQL query with the `PUSH()` -function allows to append to the array. - -```js -curl --data @- -X POST --dump http://127.0.0.1:8529/_api/cursor -{ "query": "FOR doc IN documents FILTER doc._key == @myKey UPDATE doc._key WITH { arrayValue: PUSH(doc.arrayValue, @value) } IN documents","bindVars": { "myKey": "test", "value": 42 } } - -HTTP/1.1 201 Created -Content-type: application/json; charset=utf-8 - -{ - "result" : [], - "hasMore" : false, - "extra" : { - "stats" : { - "writesExecuted" : 1, - "writesIgnored" : 0, - "scannedFull" : 0, - "scannedIndex" : 1, - "filtered" : 0 - }, - "warnings" : [] - }, - "error" : false, - "code" : 201 -} -``` - -Setting a memory limit ----------------------- - -To set a memory limit for the query, the *memoryLimit* option can be passed to -the server. -The memory limit specifies the maximum number of bytes that the query is -allowed to use. When a single AQL query reaches the specified limit value, -the query will be aborted with a *resource limit exceeded* exception. In a -cluster, the memory accounting is done per shard, so the limit value is -effectively a memory limit per query per shard. - -```js -> curl --data @- -X POST --dump - http://localhost:8529/_api/cursor -{ "query" : "FOR i IN 1..100000 SORT i RETURN i", "memoryLimit" : 100000 } - -HTTP/1.1 500 Internal Server Error -Server: ArangoDB -Connection: Keep-Alive -Content-Type: application/json; charset=utf-8 -Content-Length: 115 - -{"error":true,"errorMessage":"query would use more memory than allowed (while executing)","code":500,"errorNum":32} -``` - -If no memory limit is specified, then the server default value (controlled by -startup option *--query.memory-limit* will be used for restricting the maximum amount -of memory the query can use. A memory limit value of *0* means that the maximum -amount of memory for the query is not restricted. diff --git a/Documentation/Books/HTTP/AqlQueryCursor/README.md b/Documentation/Books/HTTP/AqlQueryCursor/README.md deleted file mode 100644 index cff8bf800477..000000000000 --- a/Documentation/Books/HTTP/AqlQueryCursor/README.md +++ /dev/null @@ -1,13 +0,0 @@ -HTTP Interface for AQL Query Cursors -==================================== - -This is an introduction to ArangoDB's HTTP Interface for Queries. Results of AQL -and simple queries are returned as cursors in order to batch the communication -between server and client. Each call returns a number of documents in a batch -and an indication if the current batch has been the final batch. Depending on -the query, the total number of documents in the result set might or might not be -known in advance. In order to free server resources the client should delete the -cursor as soon as it is no longer needed. - -To execute a query, the query details need to be shipped from the client to -the server via an HTTP POST request. diff --git a/Documentation/Books/HTTP/AqlUserFunctions/README.md b/Documentation/Books/HTTP/AqlUserFunctions/README.md deleted file mode 100644 index 7b2a5f6dd9a1..000000000000 --- a/Documentation/Books/HTTP/AqlUserFunctions/README.md +++ /dev/null @@ -1,27 +0,0 @@ -HTTP Interface for AQL User Functions Management -================================================ - -AQL User Functions Management ------------------------------ -This is an introduction to ArangoDB's HTTP interface for managing AQL -user functions. AQL user functions are a means to extend the functionality -of ArangoDB's query language (AQL) with user-defined JavaScript code. - -For an overview of how AQL user functions and their implications, please refer to -the [Extending AQL](../../AQL/Extending/index.html) chapter. - -The HTTP interface provides an API for adding, deleting, and listing -previously registered AQL user functions. - -All user functions managed through this interface will be stored in the -system collection *_aqlfunctions*. Documents in this collection should not -be accessed directly, but only via the dedicated interfaces. - - -@startDocuBlock post_api_aqlfunction - - -@startDocuBlock delete_api_aqlfunction - - -@startDocuBlock get_api_aqlfunction diff --git a/Documentation/Books/HTTP/AsyncResultsManagement/README.md b/Documentation/Books/HTTP/AsyncResultsManagement/README.md deleted file mode 100644 index 40b922692e2b..000000000000 --- a/Documentation/Books/HTTP/AsyncResultsManagement/README.md +++ /dev/null @@ -1,130 +0,0 @@ -HTTP Interface for Async Results Management -=========================================== - -Request Execution ------------------ - -ArangoDB provides various methods of executing client requests. Clients can choose the appropriate method on a per-request level based on their throughput, control flow, and durability requirements. - -### Blocking execution - -ArangoDB is a multi-threaded server, allowing the processing of multiple client -requests at the same time. Communication handling and the actual work can be performed -by multiple worker threads in parallel. - -Though multiple clients can connect and send their requests in parallel to ArangoDB, -clients may need to wait for their requests to be processed. - -By default, the server will fully process an incoming request and then return the -result to the client. The client must wait for the server's response before it can -send additional requests over the connection. For clients that are single-threaded -or not event-driven, waiting for the full server response may be non-optimal. - -Furthermore, please note that even if the client closes the HTTP -connection, the request running on the server will still continue until -it is complete and only then notice that the client no longer listens. -Thus closing the connection does not help to abort a long running query! -See below under [Async Execution and later Result Retrieval](#async-execution-and-later-result-retrieval) -and [HttpJobPutCancel](#managing-async-results-via-http) for details. - -### Fire and Forget - -To mitigate client blocking issues, ArangoDB since version 1.4. offers a generic mechanism -for non-blocking requests: if clients add the HTTP header *x-arango-async: true* to their -requests, ArangoDB will put the request into an in-memory task queue and return an HTTP 202 -(accepted) response to the client instantly. The server will execute the tasks from -the queue asynchronously, decoupling the client requests and the actual work. - -This allows for much higher throughput than if clients would wait for the server's -response. The downside is that the response that is sent to the client is always the -same (a generic HTTP 202) and clients cannot make a decision based on the actual -operation's result at this point. In fact, the operation might have not even been executed at the -time the generic response has reached the client. Clients can thus not rely on their -requests having been processed successfully. - -The asynchronous task queue on the server is not persisted, meaning not-yet processed -tasks from the queue will be lost in case of a crash. However, the client will -not know whether they were processed or not. - -Clients should thus not send the extra header when they have strict durability -requirements or if they rely on result of the sent operation for further actions. - -The maximum number of queued tasks is determined by the startup option -*--server.maximal-queue-size*. If more than this number of tasks are already queued, -the server will reject the request with an HTTP 500 error. - -Finally, please note that it is not possible to cancel such a -fire and forget job, since you won't get any handle to identify it later on. -If you need to cancel requests, -use [Async Execution and later Result Retrieval](#async-execution-and-later-result-retrieval) -and [HttpJobPutCancel](#managing-async-results-via-http) below. - -### Async Execution and later Result Retrieval - -By adding the HTTP header *x-arango-async: store* to a request, clients can instruct -the ArangoDB server to execute the operation asynchronously as [above](#fire-and-forget), -but also store the operation result in memory for a later retrieval. The -server will return a job id in the HTTP response header *x-arango-async-id*. The client -can use this id in conjunction with the HTTP API at */_api/job*, which is described in -detail in this manual. - -Clients can ask the ArangoDB server via the async jobs API which results are -ready for retrieval, and which are not. Clients can also use the async jobs API to -retrieve the original results of an already executed async job by passing it the -originally returned job id. The server will then return the job result as if the job was -executed normally. Furthermore, clients can cancel running async jobs by -their job id, see [HttpJobPutCancel](#managing-async-results-via-http). - -ArangoDB will keep all results of jobs initiated with the *x-arango-async: store* -header. Results are removed from the server only if a client explicitly asks the -server for a specific result. - -The async jobs API also provides methods for garbage collection that clients can -use to get rid of "old" not fetched results. Clients should call this method periodically -because ArangoDB does not artificially limit the number of not-yet-fetched results. - -It is thus a client responsibility to store only as many results as needed and to fetch -available results as soon as possible, or at least to clean up not fetched results -from time to time. - -The job queue and the results are kept in memory only on the server, so they will be -lost in case of a crash. - -### Canceling asynchronous jobs - -As mentioned above it is possible to cancel an asynchronously running -job using its job ID. This is done with a PUT request as described in -[HttpJobPutCancel](#managing-async-results-via-http). - -However, a few words of explanation about what happens behind the -scenes are in order. Firstly, a running async query can internally be -executed by C++ code or by JavaScript code. For example CRUD operations -are executed directly in C++, whereas AQL queries and transactions -are executed by JavaScript code. The job cancelation only works for -JavaScript code, since the mechanism used is simply to trigger an -uncatchable exception in the JavaScript thread, which will be caught -on the C++ level, which in turn leads to the cancelation of the job. -No result can be retrieved later, since all data about the request is -discarded. - -If you cancel a job running on a coordinator of a cluster (Sharding), -then only the code running on the coordinator is stopped, there may -remain tasks within the cluster which have already been distributed to -the DBservers and it is currently not possible to cancel them as well. - -### Async Execution and Authentication - -If a request requires authentication, the authentication procedure is run before -queueing. The request will only be queued if it valid credentials and the authentication -succeeds. If the request does not contain valid credentials, it will not be queued but -rejected instantly in the same way as a "regular", non-queued request. - -Managing Async Results via HTTP -------------------------------- - -@startDocuBlock job_fetch_result -@startDocuBlock job_cancel -@startDocuBlock job_delete -@startDocuBlock job_getStatusById -@startDocuBlock job_getByType - diff --git a/Documentation/Books/HTTP/BatchRequest/README.md b/Documentation/Books/HTTP/BatchRequest/README.md deleted file mode 100644 index 8f66783f7e9a..000000000000 --- a/Documentation/Books/HTTP/BatchRequest/README.md +++ /dev/null @@ -1,212 +0,0 @@ -HTTP Interface for Batch Requests -================================= - -Clients normally send individual operations to ArangoDB in individual -HTTP requests. This is straightforward and simple, but has the -disadvantage that the network overhead can be significant if many -small requests are issued in a row. - -To mitigate this problem, ArangoDB offers a batch request API that -clients can use to send multiple operations in one batch to -ArangoDB. This method is especially useful when the client has to send -many HTTP requests with a small body/payload and the individual -request results do not depend on each other. - -Clients can use ArangoDB's batch API by issuing a multipart HTTP POST -request to the URL */_api/batch* handler. The handler will accept the -request if the Content-type is *multipart/form-data* and a boundary -string is specified. ArangoDB will then decompose the batch request -into its individual parts using this boundary. This also means that -the boundary string itself must not be contained in any of the parts. -When ArangoDB has split the multipart request into its individual -parts, it will process all parts sequentially as if it were a -standalone request. When all parts are processed, ArangoDB will -generate a multipart HTTP response that contains one part for each -part operation result. For example, if you send a multipart request -with 5 parts, ArangoDB will send back a multipart response with 5 -parts as well. - -The server expects each part message to start with exactly the -following "header": - - Content-type: application/x-arango-batchpart - -You can optionally specify a *Content-Id* "header" to uniquely -identify each part message. The server will return the *Content-Id* in -its response if it is specified. Otherwise, the server will not send a -Content-Id "header" back. The server will not validate the uniqueness -of the Content-Id. After the mandatory *Content-type* and the -optional *Content-Id* header, two Windows line breaks -(i.e. *\r\n\r\n*) must follow. Any deviation of this structure -might lead to the part being rejected or incorrectly interpreted. The -part request payload, formatted as a regular HTTP request, must follow -the two Windows line breaks literal directly. - -Note that the literal *Content-type: application/x-arango-batchpart* -technically is the header of the MIME part, and the HTTP request -(including its headers) is the body part of the MIME part. - -An actual part request should start with the HTTP method, the called -URL, and the HTTP protocol version as usual, followed by arbitrary -HTTP headers. Its body should follow after the usual *\r\n\r\n* -literal. Part requests are therefore regular HTTP requests, only -embedded inside a multipart message. - -The following example will send a batch with 3 individual document -creation operations. The boundary used in this example is -*XXXsubpartXXX*. - -*Examples* - -```js -> curl -X POST --data-binary @- --header "Content-type: multipart/form-data; boundary=XXXsubpartXXX" http://localhost:8529/_api/batch ---XXXsubpartXXX -Content-type: application/x-arango-batchpart -Content-Id: 1 - -POST /_api/document?collection=xyz HTTP/1.1 - -{"a":1,"b":2,"c":3} ---XXXsubpartXXX -Content-type: application/x-arango-batchpart -Content-Id: 2 - -POST /_api/document?collection=xyz HTTP/1.1 - -{"a":1,"b":2,"c":3,"d":4} ---XXXsubpartXXX -Content-type: application/x-arango-batchpart -Content-Id: 3 - -POST /_api/document?collection=xyz HTTP/1.1 - -{"a":1,"b":2,"c":3,"d":4,"e":5} ---XXXsubpartXXX-- -``` - -The server will then respond with one multipart message, containing -the overall status and the individual results for the part -operations. The overall status should be 200 except there was an error -while inspecting and processing the multipart message. The overall -status therefore does not indicate the success of each part operation, -but only indicates whether the multipart message could be handled -successfully. - -Each part operation will return its own status value. As the part -operation results are regular HTTP responses (just included in one -multipart response), the part operation status is returned as a HTTP -status code. The status codes of the part operations are exactly the -same as if you called the individual operations standalone. Each part -operation might also return arbitrary HTTP headers and a body/payload: - -*Examples* - -```js -HTTP/1.1 200 OK -Connection: Keep-Alive -Content-type: multipart/form-data; boundary=XXXsubpartXXX -Content-length: 1055 - ---XXXsubpartXXX -Content-type: application/x-arango-batchpart -Content-Id: 1 - -HTTP/1.1 202 Accepted -Content-type: application/json; charset=utf-8 -Etag: "9514299" -Content-length: 53 - -{"error":false,"_id":"xyz/9514299","_key":"9514299","_rev":"9514299"} ---XXXsubpartXXX -Content-type: application/x-arango-batchpart -Content-Id: 2 - -HTTP/1.1 202 Accepted -Content-type: application/json; charset=utf-8 -Etag: "9579835" -Content-length: 53 - -{"error":false,"_id":"xyz/9579835","_key":"9579835","_rev":"9579835"} ---XXXsubpartXXX -Content-type: application/x-arango-batchpart -Content-Id: 3 - -HTTP/1.1 202 Accepted -Content-type: application/json; charset=utf-8 -Etag: "9645371" -Content-length: 53 - -{"error":false,"_id":"xyz/9645371","_key":"9645371","_rev":"9645371"} ---XXXsubpartXXX-- -``` - -In the above example, the server returned an overall status code of -200, and each part response contains its own status value (202 in the -example): - -When constructing the multipart HTTP response, the server will use the -same boundary that the client supplied. If any of the part responses -has a status code of 400 or greater, the server will also return an -HTTP header *x-arango-errors* containing the overall number of part -requests that produced errors: - -*Examples* - -```js -> curl -X POST --data-binary @- --header "Content-type: multipart/form-data; boundary=XXXsubpartXXX" http://localhost:8529/_api/batch ---XXXsubpartXXX -Content-type: application/x-arango-batchpart - -POST /_api/document?collection=nonexisting - -{"a":1,"b":2,"c":3} ---XXXsubpartXXX -Content-type: application/x-arango-batchpart - -POST /_api/document?collection=xyz - -{"a":1,"b":2,"c":3,"d":4} ---XXXsubpartXXX-- -``` - -In this example, the overall response code is 200, but as some of the -part request failed (with status code 404), the *x-arango-errors* -header of the overall response is *1*: - -*Examples* - -```js -HTTP/1.1 200 OK -x-arango-errors: 1 -Content-type: multipart/form-data; boundary=XXXsubpartXXX -Content-length: 711 - ---XXXsubpartXXX -Content-type: application/x-arango-batchpart - -HTTP/1.1 404 Not Found -Content-type: application/json; charset=utf-8 -Content-length: 111 - -{"error":true,"code":404,"errorNum":1203,"errorMessage":"collection \/_api\/collection\/nonexisting not found"} ---XXXsubpartXXX -Content-type: application/x-arango-batchpart - -HTTP/1.1 202 Accepted -Content-type: application/json; charset=utf-8 -Etag: "9841979" -Content-length: 53 - -{"error":false,"_id":"xyz/9841979","_key":"9841979","_rev":"9841979"} ---XXXsubpartXXX-- -``` - -Please note that the database used for all part operations of a batch -request is determined by scanning the original URL (the URL that contains -*/_api/batch*). It is not possible to override the -[database name](../../Manual/Appendix/Glossary.html#database-name) in -part operations of a batch. When doing so, any other database name used -in a batch part will be ignored. - - -@startDocuBlock batch_processing diff --git a/Documentation/Books/HTTP/BulkImports/ImportingHeadersAndValues.md b/Documentation/Books/HTTP/BulkImports/ImportingHeadersAndValues.md deleted file mode 100644 index c241f5087a2f..000000000000 --- a/Documentation/Books/HTTP/BulkImports/ImportingHeadersAndValues.md +++ /dev/null @@ -1,44 +0,0 @@ -Importing Headers and Values -============================ - -When using this type of import, the attribute names of the documents to be -imported are specified separate from the actual document value data. The first -line of the HTTP POST request body must be a JSON array containing the attribute -names for the documents that follow. The following lines are interpreted as the -document data. Each document must be a JSON array of values. No attribute names -are needed or allowed in this data section. - -*Examples* - -```js -curl --data-binary @- -X POST --dump - "http://localhost:8529/_api/import?collection=test" -[ "firstName", "lastName", "age", "gender" ] -[ "Joe", "Public", 42, "male" ] -[ "Jane", "Doe", 31, "female" ] - -HTTP/1.1 201 Created -Server: ArangoDB -Connection: Keep-Alive -Content-type: application/json; charset=utf-8 - -{"error":false,"created":2,"empty":0,"errors":0} -``` - -The server will again respond with an HTTP 201 if everything went well. The -number of documents imported will be returned in the *created* attribute of the -response. If any documents were skipped or incorrectly formatted, this will be -returned in the *errors* attribute. The number of empty lines in the input file -will be returned in the *empty* attribute. - -If the *details* parameter was set to *true* in the request, the response will -also contain an attribute *details* which is an array of details about errors that -occurred on the server side during the import. This array might be empty if no -errors occurred. - -Importing into Edge Collections -------------------------------- - -Please note that when importing documents into an -[edge collection](../../Manual/Appendix/Glossary.html#edge-collection), -it is mandatory that all imported documents contain the *_from* and *_to* attributes, -and that these contain references to existing collections. diff --git a/Documentation/Books/HTTP/BulkImports/ImportingSelfContained.md b/Documentation/Books/HTTP/BulkImports/ImportingSelfContained.md deleted file mode 100644 index 36335e7c5caf..000000000000 --- a/Documentation/Books/HTTP/BulkImports/ImportingSelfContained.md +++ /dev/null @@ -1,69 +0,0 @@ -Importing Self-Contained JSON Documents -======================================= - -This import method allows uploading self-contained JSON documents. The documents -must be uploaded in the body of the HTTP POST request. Each line of the body -will be interpreted as one stand-alone document. Empty lines in the body are -allowed but will be skipped. Using this format, the documents are imported -line-wise. - -Example input data: - { "_key": "key1", ... } - { "_key": "key2", ... } - ... - -To use this method, the *type* query parameter should be set to *documents*. - -It is also possible to upload self-contained JSON documents that are embedded -into a JSON array. Each element from the array will be treated as a document and -be imported. - -Example input data for this case: - -```js -[ - { "_key": "key1", ... }, - { "_key": "key2", ... }, - ... -] -``` - -This format does not require each document to be on a separate line, and any -whitespace in the JSON data is allowed. It can be used to import a -JSON-formatted result array (e.g. from arangosh) back into ArangoDB. Using this -format requires ArangoDB to parse the complete array and keep it in memory for -the duration of the import. This might be more resource-intensive than the -line-wise processing. - -To use this method, the *type* query parameter should be set to *array*. - -Setting the *type* query parameter to *auto* will make the server auto-detect whether -the data are line-wise JSON documents (type = documents) or a JSON array (type = array). - -*Examples* - -```js -curl --data-binary @- -X POST --dump - "http://localhost:8529/_api/import?type=documents&collection=test" -{ "name" : "test", "gender" : "male", "age" : 39 } -{ "type" : "bird", "name" : "robin" } - -HTTP/1.1 201 Created -Server: ArangoDB -Connection: Keep-Alive -Content-type: application/json; charset=utf-8 - -{"error":false,"created":2,"empty":0,"errors":0} -``` - -The server will respond with an HTTP 201 if everything went well. The number of -documents imported will be returned in the *created* attribute of the -response. If any documents were skipped or incorrectly formatted, this will be -returned in the *errors* attribute. There will also be an attribute *empty* in -the response, which will contain a value of *0*. - -If the *details* parameter was set to *true* in the request, the response will -also contain an attribute *details* which is an array of details about errors that -occurred on the server side during the import. This array might be empty if no -errors occurred. - - diff --git a/Documentation/Books/HTTP/BulkImports/README.md b/Documentation/Books/HTTP/BulkImports/README.md deleted file mode 100644 index 9a04f31fe31b..000000000000 --- a/Documentation/Books/HTTP/BulkImports/README.md +++ /dev/null @@ -1,46 +0,0 @@ -HTTP Interface for Bulk Imports -=============================== - -ArangoDB provides an HTTP interface to import multiple documents at once into a -collection. This is known as a bulk import. - -The data uploaded must be provided in JSON format. There are two mechanisms to -import the data: - -* self-contained JSON documents: in this case, each document contains all - attribute names and values. Attribute names may be completely different - among the documents uploaded -* attribute names plus document data: in this case, the first array must - contain the attribute names of the documents that follow. The following arrays - containing only the attribute values. Attribute values will be mapped to the - attribute names by positions. - -The endpoint address is */_api/import* for both input mechanisms. Data must be -sent to this URL using an HTTP POST request. The data to import must be -contained in the body of the POST request. - -The *collection* query parameter must be used to specify the target collection for -the import. Importing data into a non-existing collection will produce an error. - -The *waitForSync* query parameter can be set to *true* to make the import only -return if all documents have been synced to disk. - -The *complete* query parameter can be set to *true* to make the entire import fail if -any of the uploaded documents is invalid and cannot be imported. In this case, -no documents will be imported by the import run, even if a failure happens at the -end of the import. - -If *complete* has a value other than *true*, valid documents will be imported while -invalid documents will be rejected, meaning only some of the uploaded documents -might have been imported. - -The *details* query parameter can be set to *true* to make the import API return -details about documents that could not be imported. If *details* is *true*, then -the result will also contain a *details* attribute which is an array of detailed -error messages. If the *details* is set to *false* or omitted, no details will be -returned. - - - -@startDocuBlock import_document -@startDocuBlock import_json diff --git a/Documentation/Books/HTTP/Cluster/Health.md b/Documentation/Books/HTTP/Cluster/Health.md deleted file mode 100644 index 93b9ac8e1b08..000000000000 --- a/Documentation/Books/HTTP/Cluster/Health.md +++ /dev/null @@ -1,3 +0,0 @@ - - -@startDocuBlock get_cluster_health \ No newline at end of file diff --git a/Documentation/Books/HTTP/Cluster/Maintenance.md b/Documentation/Books/HTTP/Cluster/Maintenance.md deleted file mode 100644 index 648f722deeb8..000000000000 --- a/Documentation/Books/HTTP/Cluster/Maintenance.md +++ /dev/null @@ -1,3 +0,0 @@ - - -@startDocuBlock put_cluster_maintenance \ No newline at end of file diff --git a/Documentation/Books/HTTP/Cluster/README.md b/Documentation/Books/HTTP/Cluster/README.md deleted file mode 100644 index a953f9e3caa1..000000000000 --- a/Documentation/Books/HTTP/Cluster/README.md +++ /dev/null @@ -1,14 +0,0 @@ -HTTP Interface for Cluster -========================== - -This _Chapter_ describes the REST API of the ArangoDB Cluster. - -* [Server ID](ServerId.md) -* [Server Role](ServerRole.md) -* [Cluster Statistics](Statistics.md) -* [Cluster Health](Health.md) -* [Cluster Maintenance](Maintenance.md) -* [Agency](../Agency/README.md) - -How to repair a cluster with broken `distributeShardsLike` collections is -described in the [Repairs](../Repairs/README.md) chapter. diff --git a/Documentation/Books/HTTP/Cluster/ServerId.md b/Documentation/Books/HTTP/Cluster/ServerId.md deleted file mode 100644 index c0326183de53..000000000000 --- a/Documentation/Books/HTTP/Cluster/ServerId.md +++ /dev/null @@ -1,3 +0,0 @@ - - -@startDocuBlock get_admin_server_id \ No newline at end of file diff --git a/Documentation/Books/HTTP/Cluster/ServerRole.md b/Documentation/Books/HTTP/Cluster/ServerRole.md deleted file mode 100644 index bc74621c67b9..000000000000 --- a/Documentation/Books/HTTP/Cluster/ServerRole.md +++ /dev/null @@ -1,3 +0,0 @@ - - -@startDocuBlock get_admin_server_role \ No newline at end of file diff --git a/Documentation/Books/HTTP/Cluster/Statistics.md b/Documentation/Books/HTTP/Cluster/Statistics.md deleted file mode 100644 index 456e2a3961e1..000000000000 --- a/Documentation/Books/HTTP/Cluster/Statistics.md +++ /dev/null @@ -1,3 +0,0 @@ - - -@startDocuBlock get_cluster_statistics diff --git a/Documentation/Books/HTTP/Collection/Creating.md b/Documentation/Books/HTTP/Collection/Creating.md deleted file mode 100644 index 28ea805399a3..000000000000 --- a/Documentation/Books/HTTP/Collection/Creating.md +++ /dev/null @@ -1,12 +0,0 @@ -Creating and Deleting Collections -================================= - - - -@startDocuBlock post_api_collection - - -@startDocuBlock delete_api_collection - - -@startDocuBlock put_api_collection_truncate diff --git a/Documentation/Books/HTTP/Collection/Getting.md b/Documentation/Books/HTTP/Collection/Getting.md deleted file mode 100644 index 8bb4929a8b84..000000000000 --- a/Documentation/Books/HTTP/Collection/Getting.md +++ /dev/null @@ -1,25 +0,0 @@ -Getting Information about a Collection -====================================== - - -@startDocuBlock get_api_collection_name - - -@startDocuBlock get_api_collection_properties - - -@startDocuBlock get_api_collection_count - - -@startDocuBlock get_api_collection_figures - -@startDocuBlock get_api_collection_getResponsibleShard - - -@startDocuBlock get_api_collection_revision - - -@startDocuBlock get_api_collection_checksum - - -@startDocuBlock get_api_collections diff --git a/Documentation/Books/HTTP/Collection/Modifying.md b/Documentation/Books/HTTP/Collection/Modifying.md deleted file mode 100644 index 6e37ecf8d4fc..000000000000 --- a/Documentation/Books/HTTP/Collection/Modifying.md +++ /dev/null @@ -1,23 +0,0 @@ -Modifying a Collection -====================== - - -@startDocuBlock put_api_collection_load - - -@startDocuBlock put_api_collection_unload - - -@startDocuBlock put_api_collection_load_indexes_into_memory - - -@startDocuBlock put_api_collection_properties - - -@startDocuBlock put_api_collection_rename - - -@startDocuBlock put_api_collection_rotate - - -@startDocuBlock put_api_collection_recalculate_count diff --git a/Documentation/Books/HTTP/Collection/README.md b/Documentation/Books/HTTP/Collection/README.md deleted file mode 100644 index 956835360f06..000000000000 --- a/Documentation/Books/HTTP/Collection/README.md +++ /dev/null @@ -1,92 +0,0 @@ -HTTP Interface for Collections -============================== - -This is an introduction to ArangoDB's HTTP interface for collections. - - Collections - ----------- - -A collection consists of documents. It is uniquely identified by its -[collection identifier](../../Manual/Appendix/Glossary.html#collection-identifier). -It also has a unique name that clients should -use to identify and access it. Collections can be renamed. This will -change the collection name, but not the collection identifier. -Collections have a type that is specified by the user when the collection -is created. There are currently two types: document and edge. The default -type is document. - -Collection Identifier ---------------------- - -A collection identifier lets you refer to a collection in a database. -It is a string value and is unique within the database. Up to including -ArangoDB 1.1, the collection identifier has been a client's primary -means to access collections. Starting with ArangoDB 1.2, clients should -instead use a collection's unique name to access a collection instead of -its identifier. -ArangoDB currently uses 64bit unsigned integer values to maintain -collection ids internally. When returning collection ids to clients, -ArangoDB will put them into a string to ensure the collection id is not -clipped by clients that do not support big integers. Clients should treat -the collection ids returned by ArangoDB as opaque strings when they store -or use them locally. - -Note: collection ids have been returned as integers up to including ArangoDB 1.1 - -Collection Name ---------------- - -A collection name identifies a collection in a database. It is a string -and is unique within the database. Unlike the collection identifier it is -supplied by the creator of the collection. The collection name must consist -of letters, digits, and the _ (underscore) and - (dash) characters only. -Please refer to Naming Conventions in ArangoDB for more information on valid -collection names. - -Key Generator -------------- - -ArangoDB allows using key generators for each collection. Key generators -have the purpose of auto-generating values for the _key attribute of a document -if none was specified by the user. By default, ArangoDB will use the traditional -key generator. The traditional key generator will auto-generate key values that -are strings with ever-increasing numbers. The increment values it uses are -non-deterministic. - -Contrary, the auto increment key generator will auto-generate deterministic key -values. Both the start value and the increment value can be defined when the -collection is created. The default start value is 0 and the default increment -is 1, meaning the key values it will create by default are: - -1, 2, 3, 4, 5, ... - -When creating a collection with the auto increment key generator and an increment of 5, the generated keys would be: - -1, 6, 11, 16, 21, ... - -The auto-increment values are increased and handed out on each document insert -attempt. Even if an insert fails, the auto-increment value is never rolled back. -That means there may exist gaps in the sequence of assigned auto-increment values -if inserts fails. - -The basic operations (create, read, update, delete) for documents are mapped -to the standard HTTP methods (*POST*, *GET*, *PUT*, *DELETE*). - - -Address of a Collection ------------------------ - -All collections in ArangoDB have a unique identifier and a unique -name. ArangoDB internally uses the collection's unique identifier to -look up collections. This identifier however is managed by ArangoDB -and the user has no control over it. In order to allow users use their -own names, each collection also has a unique name, which is specified -by the user. To access a collection from the user perspective, the -collection name should be used, i.e.: - - http://server:port/_api/collection/collection-name - -For example: Assume that the collection identifier is *7254820* and -the collection name is *demo*, then the URL of that collection is: - - http://localhost:8529/_api/collection/demo diff --git a/Documentation/Books/HTTP/Database/DatabaseEndpoint.md b/Documentation/Books/HTTP/Database/DatabaseEndpoint.md deleted file mode 100644 index adcfa255202f..000000000000 --- a/Documentation/Books/HTTP/Database/DatabaseEndpoint.md +++ /dev/null @@ -1,45 +0,0 @@ -Database-to-Endpoint Mapping -============================ - -If a [database name](../../Manual/Appendix/Glossary.html#database-name) is present in the -URI as above, ArangoDB will consult the database-to-endpoint mapping for the current -endpoint, and validate if access to the database is allowed on the endpoint. -If the endpoint is not restricted to an array of databases, ArangoDB will continue with the -regular authentication procedure. If the endpoint is restricted to an array of specified databases, -ArangoDB will check if the requested database is in the array. If not, the request will be turned -down instantly. If yes, then ArangoDB will continue with the regular authentication procedure. - -If the request URI was *http:// localhost:8529/_db/mydb/...*, then the request to *mydb* will be -allowed (or disallowed) in the following situations: - -``` -Endpoint-to-database mapping Access to *mydb* allowed? ----------------------------- ------------------------- -[ ] yes -[ "_system" ] no -[ "_system", "mydb" ] yes -[ "mydb" ] yes -[ "mydb", "_system" ] yes -[ "test1", "test2" ] no -``` - -In case no database name is specified in the request URI, ArangoDB will derive the database -name from the endpoint-to-database mapping of the endpoint -the connection was coming in on. - -If the endpoint is not restricted to an array of databases, ArangoDB will assume the *_system* -database. If the endpoint is restricted to one or multiple databases, ArangoDB will assume -the first name from the array. - -Following is an overview of which database name will be assumed for different endpoint-to-database -mappings in case no database name is specified in the URI: - -``` -Endpoint-to-database mapping Database ----------------------------- -------- -[ ] _system -[ "_system" ] _system -[ "_system", "mydb" ] _system -[ "mydb" ] mydb -[ "mydb", "_system" ] mydb -``` diff --git a/Documentation/Books/HTTP/Database/DatabaseManagement.md b/Documentation/Books/HTTP/Database/DatabaseManagement.md deleted file mode 100644 index 1158f3e49343..000000000000 --- a/Documentation/Books/HTTP/Database/DatabaseManagement.md +++ /dev/null @@ -1,30 +0,0 @@ -Database Management -=================== - -This is an introduction to ArangoDB's HTTP interface for managing databases. - -The HTTP interface for databases provides operations to create and drop -individual databases. These are mapped to the standard HTTP methods *POST* -and *DELETE*. There is also the *GET* method to retrieve an array of existing -databases. - -Please note that all database management operations can only be accessed via -the default database (*_system*) and none of the other databases. - -Managing Databases using HTTP ------------------------------ - - -@startDocuBlock get_api_database_current - - -@startDocuBlock get_api_database_user - - -@startDocuBlock get_api_database_list - - -@startDocuBlock get_api_database_new - - -@startDocuBlock get_api_database_delete diff --git a/Documentation/Books/HTTP/Database/NotesOnDatabases.md b/Documentation/Books/HTTP/Database/NotesOnDatabases.md deleted file mode 100644 index 3383d1d385a5..000000000000 --- a/Documentation/Books/HTTP/Database/NotesOnDatabases.md +++ /dev/null @@ -1,63 +0,0 @@ -Notes on Databases -================== - -Please keep in mind that each database contains its own system collections, -which need to set up when a database is created. This will make the creation -of a database take a while. Replication is configured on a per-database level, -meaning that any replication logging or applying for a new database must -be configured explicitly after a new database has been created. Foxx applications -are also available only in the context of the database they have been installed -in. A new database will only provide access to the system applications shipped -with ArangoDB (that is the web interface at the moment) and no other Foxx -applications until they are explicitly installed for the particular database. - -Database --------- - -ArangoDB can handle multiple databases in the same server instance. Databases can be used to logically group and separate data. An ArangoDB database consists of collections and dedicated database-specific worker processes. -A database contains its own collections (which cannot be accessed from other databases), Foxx applications and replication loggers and appliers. Each ArangoDB database contains its own system collections (e.g. _users, _graphs, ...). - -There will always be at least one database in ArangoDB. This is the default [database name](../../Manual/Appendix/Glossary.html#database-name)d _system. This database cannot be dropped and provides special operations for creating, dropping and enumerating databases. Users can create additional databases and give them unique names to access them later. Database management operations cannot be initiated from out of user-defined databases. - -When ArangoDB is accessed via its HTTP REST API, the database name is read from the first part of the request URI path (e.g. /_db/_system/...). If the request URI does not contain a database name, the database name is automatically determined by the algorithm described in Database-to-Endpoint Mapping . - -Database Name -------------- - -A single ArangoDB instance can handle multiple databases in parallel. When multiple databases are used, each database must be given an unique name. This name is used to uniquely identify a database. The default database in ArangoDB is named _system. -The database name is a string consisting of only letters, digits and the _ (underscore) and - (dash) characters. User-defined database names must always start with a letter. Database names are case-sensitive. - -Database Organization ---------------------- - -A single ArangoDB instance can handle multiple databases in parallel. By default, there will be at least one database which is named _system. -Databases are physically stored in separate sub-directories underneath the database directory, which itself resides in the instance's data directory. - -Each database has its own sub-directory, named database-. The database directory contains sub-directories for the collections of the database, and a file named parameter.json. This file contains the database id and name. - -In an example ArangoDB instance which has two databases, the filesystem layout could look like this: - -``` -data/ # the instance's data directory - databases/ # sub-directory containing all databases' data - database-/ # sub-directory for a single database - parameter.json # file containing database id and name - collection-/ # directory containing data about a collection - database-/ # sub-directory for another database - parameter.json # file containing database id and name - collection-/ # directory containing data about a collection - collection-/ # directory containing data about a collection -``` - -Foxx applications are also organized in database-specific directories inside the application path. The filesystem layout could look like this: - -``` -apps/ # the instance's application directory - system/ # system applications (can be ignored) - databases/ # sub-directory containing database-specific applications - / # sub-directory for a single database - # sub-directory for a single application - # sub-directory for a single application - / # sub-directory for another database - # sub-directory for a single application -```` diff --git a/Documentation/Books/HTTP/Database/README.md b/Documentation/Books/HTTP/Database/README.md deleted file mode 100644 index c4a32cab4973..000000000000 --- a/Documentation/Books/HTTP/Database/README.md +++ /dev/null @@ -1,20 +0,0 @@ -HTTP Interface for Databases -============================ - -Address of a Database ---------------------- - -Any operation triggered via ArangoDB's HTTP REST API is executed in the context of exactly -one database. To explicitly specify the database in a request, the request URI must contain -the [database name](../../Manual/Appendix/Glossary.html#database-name) in front of the actual path: - - http://localhost:8529/_db/mydb/... - -where *...* is the actual path to the accessed resource. In the example, the resource will be -accessed in the context of the database *mydb*. Actual URLs in the context of *mydb* could look -like this: - - http://localhost:8529/_db/mydb/_api/version - http://localhost:8529/_db/mydb/_api/document/test/12345 - http://localhost:8529/_db/mydb/myapp/get - diff --git a/Documentation/Books/HTTP/Document/AddressAndEtag.md b/Documentation/Books/HTTP/Document/AddressAndEtag.md deleted file mode 100644 index 87db1ac75171..000000000000 --- a/Documentation/Books/HTTP/Document/AddressAndEtag.md +++ /dev/null @@ -1,162 +0,0 @@ -Basics and Terminology -====================== - -Documents, Keys, Handles and Revisions --------------------------------------- - -Documents in ArangoDB are JSON objects. These objects can be nested (to -any depth) and may contain lists. Each document has a unique -[primary key](../../Manual/Appendix/Glossary.html#document-key) which -identifies it within its collection. Furthermore, each document is -uniquely identified -by its [document handle](../../Manual/Appendix/Glossary.html#document-handle) -across all collections in the same database. Different revisions of -the same document (identified by its handle) can be distinguished by their -[document revision](../../Manual/Appendix/Glossary.html#document-revision). -Any transaction only ever sees a single revision of a document. - -Here is an example document: - -```js -{ - "_id" : "myusers/3456789", - "_key" : "3456789", - "_rev" : "14253647", - "firstName" : "John", - "lastName" : "Doe", - "address" : { - "street" : "Road To Nowhere 1", - "city" : "Gotham" - }, - "hobbies" : [ - {name: "swimming", howFavorite: 10}, - {name: "biking", howFavorite: 6}, - {name: "programming", howFavorite: 4} - ] -} -``` - -All documents contain special attributes: the -[document handle](../../Manual/Appendix/Glossary.html#document-handle) is stored -as a string in `_id`, the -[document's primary key](../../Manual/Appendix/Glossary.html#document-key) in -`_key` and the -[document revision](../../Manual/Appendix/Glossary.html#document-revision) in -`_rev`. The value of the `_key` attribute can be specified by the user when -creating a document. `_id` and `_key` values are immutable once the document -has been created. The `_rev` value is maintained by ArangoDB automatically. - - -Document Handle ---------------- - -A document handle uniquely identifies a document in the database. It -is a string and consists of the collection's name and the document key -(`_key` attribute) separated by `/`. - - -Document Key ------------- - -A document key uniquely identifies a document in the collection it is -stored in. It can and should be used by clients when specific documents -are queried. The document key is stored in the `_key` attribute of -each document. The key values are automatically indexed by ArangoDB in -a collection's primary index. Thus looking up a document by its -key is a fast operation. The _key value of a document is -immutable once the document has been created. By default, ArangoDB will -auto-generate a document key if no _key attribute is specified, and use -the user-specified _key otherwise. - -This behavior can be changed on a per-collection level by creating -collections with the `keyOptions` attribute. - -Using `keyOptions` it is possible to disallow user-specified keys -completely, or to force a specific regime for auto-generating the `_key` -values. - - -Document Revision ------------------ - -@startDocuBlock documentRevision - - -Document Etag -------------- - -ArangoDB tries to adhere to the existing HTTP standard as far as -possible. To this end, results of single document queries have the HTTP -header `Etag` set to the document revision enclosed in double quotes. - -The basic operations (create, read, exists, replace, update, delete) -for documents are mapped to the standard HTTP methods (*POST*, *GET*, -*HEAD*, *PUT*, *PATCH* and *DELETE*). - -If you modify a document, you can use the *If-Match* field to detect conflicts. -The revision of a document can be checking using the HTTP method *HEAD*. - - -Multiple Documents in a single Request --------------------------------------- - -Beginning with ArangoDB 3.0 the basic document API has been extended -to handle not only single documents but multiple documents in a single -request. This is crucial for performance, in particular in the cluster -situation, in which a single request can involve multiple network hops -within the cluster. Another advantage is that it reduces the overhead of -the HTTP protocol and individual network round trips between the client -and the server. The general idea to perform multiple document operations -in a single request is to use a JSON array of objects in the place of a -single document. As a consequence, document keys, handles and revisions -for preconditions have to be supplied embedded in the individual documents -given. Multiple document operations are restricted to a single document -or edge collections. -See the [API descriptions](WorkingWithDocuments.md) for details. - -Note that the *GET*, *HEAD* and *DELETE* HTTP operations generally do -not allow to pass a message body. Thus, they cannot be used to perform -multiple document operations in one request. However, there are other -endpoints to request and delete multiple documents in one request. -FIXME: ADD SENSIBLE LINKS HERE. - - -URI of a Document ------------------ - -Any document can be retrieved using its unique URI: - - http://server:port/_api/document/ - -For example, assuming that the document handle -is `demo/362549736`, then the URL of that document -is: - - http://localhost:8529/_api/document/demo/362549736 - -The above URL schema does not specify a -[database name](../../Manual/Appendix/Glossary.html#database-name) -explicitly, so the -default database `_system` will be used. -To explicitly specify the database context, use -the following URL schema: - - http://server:port/_db//_api/document/ - -Example: - - http://localhost:8529/_db/mydb/_api/document/demo/362549736 - -**Note**: The following examples use the short URL format for brevity. - -The [document revision](../../Manual/Appendix/Glossary.html#document-revision) -is returned in the "Etag" HTTP header when requesting a document. - -If you obtain a document using *GET* and you want to check whether a -newer revision -is available, then you can use the *If-None-Match* header. If the document is -unchanged, a *HTTP 412* (precondition failed) error is returned. - -If you want to query, replace, update or delete a document, then you -can use the *If-Match* header. If the document has changed, then the -operation is aborted and an *HTTP 412* error is returned. diff --git a/Documentation/Books/HTTP/Document/README.md b/Documentation/Books/HTTP/Document/README.md deleted file mode 100644 index e0fb8d4a76a4..000000000000 --- a/Documentation/Books/HTTP/Document/README.md +++ /dev/null @@ -1,7 +0,0 @@ -HTTP Interface for Documents -============================ - -In this chapter we describe the REST API of ArangoDB for documents. - - - [Basic approach](AddressAndEtag.md) - - [Detailed API description](WorkingWithDocuments.md) diff --git a/Documentation/Books/HTTP/Document/WorkingWithDocuments.md b/Documentation/Books/HTTP/Document/WorkingWithDocuments.md deleted file mode 100644 index d9fc165e145c..000000000000 --- a/Documentation/Books/HTTP/Document/WorkingWithDocuments.md +++ /dev/null @@ -1,125 +0,0 @@ -Working with Documents using REST -================================= - - -@startDocuBlock get_read_document - -**Changes in 3.0 from 2.8:** - -The *rev* query parameter has been withdrawn. The same effect can be -achieved with the *If-Match* HTTP header. - - -@startDocuBlock head_read_document_header - -**Changes in 3.0 from 2.8:** - -The *rev* query parameter has been withdrawn. The same effect can be -achieved with the *If-Match* HTTP header. - - -@startDocuBlock put_read_all_documents - -**Changes in 3.0 from 2.8:** - -The collection name should now be specified in the URL path. The old -way with the URL path */_api/document* and the required query parameter -*collection* still works. - - -@startDocuBlock post_create_document - -**Changes in 3.0 from 2.8:** - -The collection name should now be specified in the URL path. The old -way with the URL path */_api/document* and the required query parameter -*collection* still works. The possibility to insert multiple documents -with one operation is new and the query parameter *returnNew* has been added. - - - -@startDocuBlock put_replace_document - -**Changes in 3.0 from 2.8:** - -There are quite some changes in this in comparison to Version 2.8, but -few break existing usage: - - - the *rev* query parameter is gone (was duplication of If-Match) - - the *policy* query parameter is gone (was non-sensical) - - the *ignoreRevs* query parameter is new, the default *true* gives - the traditional behavior as in 2.8 - - the *returnNew* and *returnOld* query parameters are new - -There should be very few changes to behavior happening in real-world -situations or drivers. Essentially, one has to replace usage of the -*rev* query parameter by usage of the *If-Match* header. The non-sensical -combination of *If-Match* given and *policy=last* no longer works, but can -easily be achieved by leaving out the *If-Match* header. - -The collection name should now be specified in the URL path. The old -way with the URL path */_api/document* and the required query parameter -*collection* still works. - - -@startDocuBlock put_replace_document_MULTI - -**Changes in 3.0 from 2.8:** - -The multi document version is new in 3.0. - - -@startDocuBlock patch_update_document - -**Changes in 3.0 from 2.8:** - -There are quite some changes in this in comparison to Version 2.8, but -few break existing usage: - - - the *rev* query parameter is gone (was duplication of If-Match) - - the *policy* query parameter is gone (was non-sensical) - - the *ignoreRevs* query parameter is new, the default *true* gives - the traditional behavior as in 2.8 - - the *returnNew* and *returnOld* query parameters are new - -There should be very few changes to behavior happening in real-world -situations or drivers. Essentially, one has to replace usage of the -*rev* query parameter by usage of the *If-Match* header. The non-sensical -combination of *If-Match* given and *policy=last* no longer works, but can -easily be achieved by leaving out the *If-Match* header. - -The collection name should now be specified in the URL path. The old -way with the URL path */_api/document* and the required query parameter -*collection* still works. - - -@startDocuBlock patch_update_document_MULTI - -**Changes in 3.0 from 2.8:** - -The multi document version is new in 3.0. - - -@startDocuBlock delete_remove_document - -**Changes in 3.0 from 2.8:** - -There are only very few changes in this in comparison to Version 2.8: - - - the *rev* query parameter is gone (was duplication of If-Match) - - the *policy* query parameter is gone (was non-sensical) - - the *returnOld* query parameter is new - -There should be very few changes to behavior happening in real-world -situations or drivers. Essentially, one has to replace usage of the -*rev* query parameter by usage of the *If-Match* header. The non-sensical -combination of *If-Match* given and *policy=last* no longer works, but can -easily be achieved by leaving out the *If-Match* header. - - -@startDocuBlock delete_remove_document_MULTI - -**Changes in 3.0 from 2.8:** - -This variant is new in 3.0. Note that it requires a body in the DELETE -request. diff --git a/Documentation/Books/HTTP/Edge/AddressAndEtag.md b/Documentation/Books/HTTP/Edge/AddressAndEtag.md deleted file mode 100644 index 912ba464fd94..000000000000 --- a/Documentation/Books/HTTP/Edge/AddressAndEtag.md +++ /dev/null @@ -1,30 +0,0 @@ -Address and Etag of an Edge -=========================== - -All documents in ArangoDB have a [document handle](../../Manual/Appendix/Glossary.html#document-handle). This handle uniquely identifies -a document. Any document can be retrieved using its unique URI: - - http://server:port/_api/document/ - -Edges are a special variation of documents. To access an edge use the same -URL format as for a document: - - http://server:port/_api/document/ - -For example, assumed that the document handle, which is stored in the *_id* -attribute of the edge, is *demo/362549736*, then the URL of that edge is: - - http://localhost:8529/_api/document/demo/362549736 - -The above URL scheme does not specify a [database name](../../Manual/Appendix/Glossary.html#database-name) explicitly, so the -default database will be used. To explicitly specify the database context, use -the following URL schema: - - http://server:port/_db//_api/document/ - -*Example*: - - http://localhost:8529/_db/mydb/_api/document/demo/362549736 - -**Note**: that the following examples use the short URL format for brevity. - diff --git a/Documentation/Books/HTTP/Edge/README.md b/Documentation/Books/HTTP/Edge/README.md deleted file mode 100644 index d86ad8b22858..000000000000 --- a/Documentation/Books/HTTP/Edge/README.md +++ /dev/null @@ -1,7 +0,0 @@ -HTTP Interface for Edges -======================== - -This is an introduction to ArangoDB's [REST interface for edges](../../Manual/Graphs/Edges/index.html). - -ArangoDB offers [graph functionality](../../Manual/Graphs/index.html); Edges are one part of that. - diff --git a/Documentation/Books/HTTP/Edge/WorkingWithEdges.md b/Documentation/Books/HTTP/Edge/WorkingWithEdges.md deleted file mode 100644 index 06190577262d..000000000000 --- a/Documentation/Books/HTTP/Edge/WorkingWithEdges.md +++ /dev/null @@ -1,16 +0,0 @@ -Working with Edges using REST -============================= - -This is documentation to ArangoDB's -[REST interface for edges](../../Manual/Graphs/Edges/index.html). - -Edges are documents with two additional attributes: *_from* and *_to*. -These attributes are mandatory and must contain the document-handle -of the from and to vertices of an edge. - -Use the general document -[REST api](../Document/WorkingWithDocuments.md) -for create/read/update/delete. - - -@startDocuBlock get_read_in_out_edges diff --git a/Documentation/Books/HTTP/Endpoints/README.md b/Documentation/Books/HTTP/Endpoints/README.md deleted file mode 100644 index c87fb8b95f76..000000000000 --- a/Documentation/Books/HTTP/Endpoints/README.md +++ /dev/null @@ -1,25 +0,0 @@ -HTTP Interface for Endpoints -============================ - -The API `/_api/endpoint` is *deprecated*. For cluster mode there -is `/_api/cluster/endpoints` to find all current coordinator endpoints -(see below). - -The ArangoDB server can listen for incoming requests on multiple *endpoints*. - -The endpoints are normally specified either in ArangoDB's configuration -file or on the command-line, using the "--server.endpoint" option. -The default endpoint for ArangoDB is *tcp://127.0.0.1:8529* or -*tcp://localhost:8529*. - -Please note that all endpoint management operations can only be accessed via -the default database (*_system*) and none of the other databases. - -Asking about Endpoints via HTTP ---------------------------- - - -@startDocuBlock get_api_cluster_endpoints - - -@startDocuBlock get_api_endpoint diff --git a/Documentation/Books/HTTP/Export/README.md b/Documentation/Books/HTTP/Export/README.md deleted file mode 100644 index 9d63b0c4de1b..000000000000 --- a/Documentation/Books/HTTP/Export/README.md +++ /dev/null @@ -1,4 +0,0 @@ -HTTP Interface for Exporting Documents -====================================== - -@startDocuBlock post_api_export diff --git a/Documentation/Books/HTTP/FOOTER.html b/Documentation/Books/HTTP/FOOTER.html deleted file mode 100644 index 239869bfaf6a..000000000000 --- a/Documentation/Books/HTTP/FOOTER.html +++ /dev/null @@ -1 +0,0 @@ -© ArangoDB - the native multi-model NoSQL database \ No newline at end of file diff --git a/Documentation/Books/HTTP/Foxx/Configuration.md b/Documentation/Books/HTTP/Foxx/Configuration.md deleted file mode 100644 index 71667fd21c0a..000000000000 --- a/Documentation/Books/HTTP/Foxx/Configuration.md +++ /dev/null @@ -1,16 +0,0 @@ -Foxx Service configuration / dependencies -========================================= - -This is an introduction to ArangoDB's HTTP interface for managing Foxx services configuration and dependencies. - -@startDocuBlock api_foxx_configuration_get - -@startDocuBlock api_foxx_configuration_update - -@startDocuBlock api_foxx_configuration_replace - -@startDocuBlock api_foxx_dependencies_get - -@startDocuBlock api_foxx_dependencies_update - -@startDocuBlock api_foxx_dependencies_replace diff --git a/Documentation/Books/HTTP/Foxx/Management.md b/Documentation/Books/HTTP/Foxx/Management.md deleted file mode 100644 index be1f17249001..000000000000 --- a/Documentation/Books/HTTP/Foxx/Management.md +++ /dev/null @@ -1,16 +0,0 @@ -Foxx Service Management -======================= - -This is an introduction to ArangoDB's HTTP interface for managing Foxx services. - -@startDocuBlock api_foxx_service_list - -@startDocuBlock api_foxx_service_details - -@startDocuBlock api_foxx_service_install - -@startDocuBlock api_foxx_service_uninstall - -@startDocuBlock api_foxx_service_replace - -@startDocuBlock api_foxx_service_upgrade diff --git a/Documentation/Books/HTTP/Foxx/Miscellaneous.md b/Documentation/Books/HTTP/Foxx/Miscellaneous.md deleted file mode 100644 index b6ed9603868d..000000000000 --- a/Documentation/Books/HTTP/Foxx/Miscellaneous.md +++ /dev/null @@ -1,20 +0,0 @@ -Foxx Service Miscellaneous -========================== - -@startDocuBlock api_foxx_scripts_list - -@startDocuBlock api_foxx_scripts_run - -@startDocuBlock api_foxx_tests_run - -@startDocuBlock api_foxx_development_enable - -@startDocuBlock api_foxx_development_disable - -@startDocuBlock api_foxx_readme - -@startDocuBlock api_foxx_swagger - -@startDocuBlock api_foxx_bundle - -@startDocuBlock api_foxx_commit diff --git a/Documentation/Books/HTTP/Foxx/README.md b/Documentation/Books/HTTP/Foxx/README.md deleted file mode 100644 index b5a8c0c43f9a..000000000000 --- a/Documentation/Books/HTTP/Foxx/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Foxx HTTP API -============= - -These routes allow manipulating the Foxx services installed in a database. - -For more information on Foxx and its JavaScript APIs see the [Foxx chapter of the main documentation](../../Manual/Foxx/index.html). diff --git a/Documentation/Books/HTTP/General/README.md b/Documentation/Books/HTTP/General/README.md deleted file mode 100644 index c327d1242551..000000000000 --- a/Documentation/Books/HTTP/General/README.md +++ /dev/null @@ -1,446 +0,0 @@ -General HTTP Request Handling in ArangoDB -========================================= - -Protocol --------- - -ArangoDB exposes its API via HTTP, making the server accessible easily with -a variety of clients and tools (e.g. browsers, curl, telnet). The communication -can optionally be SSL-encrypted. - -ArangoDB uses the standard HTTP methods (e.g. *GET*, *POST*, *PUT*, *DELETE*) plus -the *PATCH* method described in [RFC 5789](http://tools.ietf.org/html/rfc5789). - -Most server APIs expect clients to send any payload data in [JSON](http://www.json.org) -format. Details on the expected format and JSON attributes can be found in the -documentation of the individual server methods. - -Clients sending requests to ArangoDB must use either HTTP 1.0 or HTTP 1.1. -Other HTTP versions are not supported by ArangoDB and any attempt to send -a different HTTP version signature will result in the server responding with -an HTTP 505 (HTTP version not supported) error. - -ArangoDB will always respond to client requests with HTTP 1.1. Clients -should therefore support HTTP version 1.1. - -Clients are required to include the *Content-Length* HTTP header with the -correct content length in every request that can have a body (e.g. *POST*, -*PUT* or *PATCH*) request. ArangoDB will not process requests without a -*Content-Length* header - thus chunked transfer encoding for POST-documents -is not supported. - -HTTP Keep-Alive ---------------- - -ArangoDB supports HTTP keep-alive. If the client does not send a *Connection* -header in its request, and the client uses HTTP version 1.1, ArangoDB will assume -the client wants to keep alive the connection. -If clients do not wish to use the keep-alive feature, they should -explicitly indicate that by sending a *Connection: Close* HTTP header in -the request. - -ArangoDB will close connections automatically for clients that send requests -using HTTP 1.0, except if they send an *Connection: Keep-Alive* header. - -The default Keep-Alive timeout can be specified at server start using the -*--http.keep-alive-timeout* parameter. - -Establishing TCP connections is expensive, since it takes several ping pongs -between the communication parties. Therefore you can use connection keepalive -to send several HTTP request over one TCP-connection; -Each request is treated independently by definition. You can use this feature -to build up a so called *connection pool* with several established -connections in your client application, and dynamically re-use -one of those then idle connections for subsequent requests. - -Blocking vs. Non-blocking HTTP Requests ---------------------------------------- - -ArangoDB supports both blocking and non-blocking HTTP requests. - -ArangoDB is a multi-threaded server, allowing the processing of multiple -client requests at the same time. Request/response handling and the actual -work are performed on the server in parallel by multiple worker threads. - -Still, clients need to wait for their requests to be processed by the server, -and thus keep one connection of a pool occupied. -By default, the server will fully process an incoming request and then return -the result to the client when the operation is finished. The client must -wait for the server's HTTP response before it can send additional requests over -the same connection. For clients that are single-threaded and/or are -blocking on I/O themselves, waiting idle for the server response may be -non-optimal. - -To reduce blocking on the client side, ArangoDB offers a generic mechanism for -non-blocking, asynchronous execution: clients can add the -HTTP header *x-arango-async: true* to any of their requests, marking -them as to be executed asynchronously on the server. ArangoDB will put such -requests into an in-memory task queue and return an *HTTP 202* (accepted) -response to the client instantly and thus finish this HTTP-request. -The server will execute the tasks from the queue asynchronously as fast -as possible, while clients can continue to do other work. -If the server queue is full (i.e. contains as many tasks as specified by the -option ["--server.maximal-queue-size"](../../Manual/Programs/Arangod/Options.html#server-options)), -then the request will be rejected instantly with an *HTTP 500* (internal -server error) response. - -Asynchronous execution decouples the request/response handling from the actual -work to be performed, allowing fast server responses and greatly reducing wait -time for clients. Overall this allows for much higher throughput than if -clients would always wait for the server's response. - -Keep in mind that the asynchronous execution is just "fire and forget". -Clients will get any of their asynchronous requests answered with a generic -HTTP 202 response. At the time the server sends this response, it does not -know whether the requested operation can be carried out successfully (the -actual operation execution will happen at some later point). Clients therefore -cannot make a decision based on the server response and must rely on their -requests being valid and processable by the server. - -Additionally, the server's asynchronous task queue is an in-memory data -structure, meaning not-yet processed tasks from the queue might be lost in -case of a crash. Clients should therefore not use the asynchronous feature -when they have strict durability requirements or if they rely on the immediate -result of the request they send. - -For details on the subsequent processing -[read on under Async Result handling](../AsyncResultsManagement/README.md). - -Authentication --------------- - -Client authentication can be achieved by using the *Authorization* HTTP header in -client requests. ArangoDB supports authentication via HTTP Basic or JWT. - -Authentication is turned on by default for all internal database APIs but turned off for custom Foxx apps. -To toggle authentication for incoming requests to the internal database APIs, use the option -[--server.authentication](../../Manual/Programs/Arangod/Server.html#enabledisable-authentication). -This option is turned on by default so authentication is required for the database APIs. - -Please note that requests using the HTTP OPTIONS method will be answered by -ArangoDB in any case, even if no authentication data is sent by the client or if -the authentication data is wrong. This is required for handling CORS preflight -requests (see [Cross Origin Resource Sharing requests](#cross-origin-resource-sharing-cors-requests)). -The response to an HTTP OPTIONS request will be generic and not expose any private data. - -There is an additional option to control authentication for custom Foxx apps. The option -[--server.authentication-system-only](../../Manual/Programs/Arangod/Server.html#enabledisable-authentication-for-system-api-requests-only) -controls whether authentication is required only for requests to the internal database APIs and the admin interface. -It is turned on by default, meaning that other APIs (this includes custom Foxx apps) do not require authentication. - -The default values allow exposing a public custom Foxx API built with ArangoDB to the outside -world without the need for HTTP authentication, but still protecting the usage of the -internal database APIs (i.e. */_api/*, */_admin/*) with HTTP authentication. - -If the server is started with the *--server.authentication-system-only* option set -to *false*, all incoming requests will need HTTP authentication if the server is configured -to require HTTP authentication (i.e. *--server.authentication true*). -Setting the option to *true* will make the server require authentication only for requests to the -internal database APIs and will allow unauthenticated requests to all other URLs. - -Here's a short summary: - -* `--server.authentication true --server.authentication-system-only true`: this will require - authentication for all requests to the internal database APIs but not custom Foxx apps. - This is the default setting. -* `--server.authentication true --server.authentication-system-only false`: this will require - authentication for all requests (including custom Foxx apps). -* `--server.authentication false`: authentication disabled for all requests - -Whenever authentication is required and the client has not yet authenticated, -ArangoDB will return *HTTP 401* (Unauthorized). It will also send the *WWW-Authenticate* -response header, indicating that the client should prompt the user for username and -password if supported. If the client is a browser, then sending back this header will -normally trigger the display of the browser-side HTTP authentication dialog. -As showing the browser HTTP authentication dialog is undesired in AJAX requests, -ArangoDB can be told to not send the *WWW-Authenticate* header back to the client. -Whenever a client sends the *X-Omit-WWW-Authenticate* HTTP header (with an arbitrary value) -to ArangoDB, ArangoDB will only send status code 401, but no *WWW-Authenticate* header. -This allows clients to implement credentials handling and bypassing the browser's -built-in dialog. - -### Authentication via JWT - -ArangoDB uses a standard JWT based authentication method. -To authenticate via JWT you must first obtain a JWT token with a signature generated via HMAC with SHA-256. -The secret may either be set using `--server.jwt-secret` or will be randomly generated upon server startup. - -For more information on JWT please consult RFC7519 and https://jwt.io - -#### User JWT-Token - -To authenticate with a specific user you need to supply a JWT token containing -the _preferred_username_ field with the username. -You can either let ArangoDB generate this token for you via an API call -or you can generate it yourself (only if you know the JWT secret). - -ArangoDB offers a REST API to generate user tokens for you if you know the username and password. -To do so send a POST request to - -*/_open/auth* -containing *username* and *password* JSON-encoded like so: - -{"username":"root","password":"rootPassword"} - -Upon success the endpoint will return a **200 OK** and an answer containing the JWT in a JSON- -encoded object like so: - -``` -{"jwt":"eyJhbGciOiJIUzI1NiI..x6EfI"} -``` - -This JWT should then be used within the Authorization HTTP header in subsequent requests: - -``` -Authorization: bearer eyJhbGciOiJIUzI1NiI..x6EfI -``` - -Please note that the JWT will expire after 1 month and needs to be updated. We encode the expiration -date of the JWT token in the _exp_ field in unix time. -Please note that all JWT tokens must contain the _iss_ field with string value `arangodb`. -As an example the decoded JWT body would look like this: - -```json -{ - "exp": 1540381557, - "iat": 1537789.55727901, - "iss": "arangodb", - "preferred_username": "root" -} -``` - -#### Superuser JWT-Token - -To access specific internal APIs as well as Agency and DBServer instances a token generated via `/open/auth` is not -good enough. For these special APIs you will need to generate a special JWT token which grants superuser access. -Note that using superuser access for normal database operations is **NOT advised**. - -_Note_: It is only possible to generate this JWT token with the knowledge of the JWT secret. - -For your convenience it is possible to generate this token via the [ArangoDB starter CLI](../../Manual/Programs/Starter/Security.html#using-authentication-tokens). - -Should you wish to generate the JWT token yourself with a tool of your choice, you need to include the correct body. -The body must contain the _iss_ field with string value `arangodb` and the `server_id` field with an arbitrary string identifier: - -```json -{ - "exp": 1537900279, - "iat": 1537800279, - "iss": "arangodb", - "server_id": "myclient" -} -``` - -For example to generate a token via the [jwtgen tool](https://www.npmjs.com/package/jwtgen) (note the lifetime of one hour): - -``` -jwtgen -s -e 3600 -v -a "HS256" -c 'iss=arangodb' -c 'server_id=myclient' -curl -v -H "Authorization: bearer $(jwtgen -s -e 3600 -a "HS256" -c 'iss=arangodb' -c 'server_id=myclient')" http://:8529/_api/version -``` - -Error Handling --------------- - -The following should be noted about how ArangoDB handles client errors in its -HTTP layer: - -* client requests using an HTTP version signature different than *HTTP/1.0* or - *HTTP/1.1* will get an *HTTP 505* (HTTP version not supported) error in return. -* ArangoDB will reject client requests with a negative value in the - *Content-Length* request header with *HTTP 411* (Length Required). -* ArangoDB doesn't support POST with *transfer-encoding: chunked* which forbids - the *Content-Length* header above. -* the maximum URL length accepted by ArangoDB is 16K. Incoming requests with - longer URLs will be rejected with an *HTTP 414* (Request-URI too long) error. -* if the client sends a *Content-Length* header with a value bigger than 0 for - an HTTP GET, HEAD, or DELETE request, ArangoDB will process the request, but - will write a warning to its log file. -* when the client sends a *Content-Length* header that has a value that is lower - than the actual size of the body sent, ArangoDB will respond with *HTTP 400* - (Bad Request). -* if clients send a *Content-Length* value bigger than the actual size of the - body of the request, ArangoDB will wait for about 90 seconds for the client to - complete its request. If the client does not send the remaining body data - within this time, ArangoDB will close the connection. Clients should avoid - sending such malformed requests as this will block one tcp connection, - and may lead to a temporary file descriptor leak. -* when clients send a body or a *Content-Length* value bigger than the maximum - allowed value (512 MB), ArangoDB will respond with *HTTP 413* (Request Entity - Too Large). -* if the overall length of the HTTP headers a client sends for one request - exceeds the maximum allowed size (1 MB), the server will fail with *HTTP 431* - (Request Header Fields Too Large). -* if clients request an HTTP method that is not supported by the server, ArangoDB - will return with *HTTP 405* (Method Not Allowed). ArangoDB offers general - support for the following HTTP methods: - * GET - * POST - * PUT - * DELETE - * HEAD - * PATCH - * OPTIONS - - Please note that not all server actions allow using all of these HTTP methods. - You should look up the supported methods for each method you intend to use - in the manual. - - Requests using any other HTTP method (such as for example CONNECT, TRACE etc.) - will be rejected by ArangoDB as mentioned before. - -Cross-Origin Resource Sharing (CORS) requests ---------------------------------------------- - -ArangoDB will automatically handle CORS requests as follows: - -### Preflight - -When a browser is told to make a cross-origin request that includes explicit -headers, credentials or uses HTTP methods other than `GET` or `POST`, it will -first perform a so-called preflight request using the `OPTIONS` method. - -ArangoDB will respond to `OPTIONS` requests with an HTTP 200 status response -with an empty body. Since preflight requests are not expected to include or -even indicate the presence of authentication credentials even when they will -be present in the actual request, ArangoDB does not enforce authentication for -`OPTIONS` requests even when authentication is enabled. - -ArangoDB will set the following headers in the response: - -* `access-control-allow-credentials`: will be set to `false` by default. - For details on when it will be set to `true` see the next section on cookies. - -* `access-control-allow-headers`: will be set to the exact value of the - request's `access-control-request-headers` header or omitted if no such - header was sent in the request. - -* `access-control-allow-methods`: will be set to a list of all supported HTTP - headers regardless of the target endpoint. In other words that a method is - listed in this header does not guarantee that it will be supported by the - endpoint in the actual request. - -* `access-control-allow-origin`: will be set to the exact value of the - request's `origin` header. - -* `access-control-expose-headers`: will be set to a list of response headers used - by the ArangoDB HTTP API. - -* `access-control-max-age`: will be set to an implementation-specific value. - -### Actual request - -If a request using any other HTTP method than `OPTIONS` includes an `origin` header, -ArangoDB will add the following headers to the response: - -* `access-control-allow-credentials`: will be set to `false` by default. - For details on when it will be set to `true` see the next section on cookies. - -* `access-control-allow-origin`: will be set to the exact value of the - request's `origin` header. - -* `access-control-expose-headers`: will be set to a list of response headers used - by the ArangoDB HTTP API. - -When making CORS requests to endpoints of Foxx services, the value of the -`access-control-expose-headers` header will instead be set to a list of -response headers used in the response itself (but not including the -`access-control-` headers). Note that [Foxx services may override this behavior](../../Manual/Foxx/Guides/Browser.html#cross-origin-resource-sharing-cors). - -### Cookies and authentication - -In order for the client to be allowed to correctly provide authentication -credentials or handle cookies, ArangoDB needs to set the -`access-control-allow-credentials` response header to `true` instead of `false`. - -ArangoDB will automatically set this header to `true` if the value of the -request's `origin` header matches a trusted origin in the `http.trusted-origin` -configuration option. To make ArangoDB trust a certain origin, you can provide -a startup option when running `arangod` like this: - -`--http.trusted-origin "http://localhost:8529"` - -To specify multiple trusted origins, the option can be specified multiple times. -Alternatively you can use the special value `"*"` to trust any origin: - -`--http.trusted-origin "*"` - -Note that browsers will not actually include credentials or cookies in cross-origin -requests unless explicitly told to do so: - -* When using the Fetch API you need to set the - [`credentials` option to `include`](https://fetch.spec.whatwg.org/#cors-protocol-and-credentials). - - ```js - fetch("./", { credentials:"include" }).then(/* … */) - ``` - -* When using `XMLHttpRequest` you need to set the - [`withCredentials` option to `true`](https://developer.mozilla.org/en-US/docs/Web/API/XMLHttpRequest/withCredentials). - - ```js - var xhr = new XMLHttpRequest(); - xhr.open('GET', 'https://example.com/', true); - xhr.withCredentials = true; - xhr.send(null); - ``` - -* When using jQuery you need to set the `xhrFields` option: - - ```js - $.ajax({ - url: 'https://example.com', - xhrFields: { - withCredentials: true - } - }); - ``` - -HTTP method overriding ----------------------- - -ArangoDB provides a startup option *--http.allow-method-override*. -This option can be set to allow overriding the HTTP request method (e.g. GET, POST, -PUT, DELETE, PATCH) of a request using one of the following custom HTTP headers: - -* *x-http-method-override* -* *x-http-method* -* *x-method-override* - -This allows using HTTP clients that do not support all "common" HTTP methods such as -PUT, PATCH and DELETE. It also allows bypassing proxies and tools that would otherwise -just let certain types of requests (e.g. GET and POST) pass through. - -Enabling this option may impose a security risk, so it should only be used in very -controlled environments. Thus the default value for this option is *false* (no method -overriding allowed). You need to enable it explicitly if you want to use this -feature. - -Load-balancer support ---------------------- - -When running in cluster mode, ArangoDB exposes some APIs which store request -state data on specific coordinator nodes, and thus subsequent requests which -require access to this state must be served by the coordinator node which owns -this state data. In order to support function behind a load-balancer, ArangoDB -can transparently forward requests within the cluster to the correct node. If a -request is forwarded, the response will contain the following custom HTTP header -whose value will be the ID of the node which actually answered the request: - -* *x-arango-request-forwarded-to* - -The following APIs may use request forwarding: - -* `/_api/cursor` -* `/_api/job` -* `/_api/tasks` - -Note: since forwarding such requests require an additional cluster-internal HTTP -request, they should be avoided when possible for best performance. Typically -this is accomplished either by directing the requests to the correct coordinator -at a client-level or by enabling request "stickiness" on a load balancer. Since -these approaches are not always possible in a given environment, we support the -request forwarding as a fall-back solution. - -Note: some endpoints which return "global" data, such as `GET /_api/tasks` will -only return data corresponding to the server on which the request is executed. -These endpoints will generally not work well with load-balancers. diff --git a/Documentation/Books/HTTP/Gharial/Edges.md b/Documentation/Books/HTTP/Gharial/Edges.md deleted file mode 100644 index d5226774f3e1..000000000000 --- a/Documentation/Books/HTTP/Gharial/Edges.md +++ /dev/null @@ -1,22 +0,0 @@ -Handling Edges -============== - -Examples will explain the REST API for [manipulating edges](../../Manual/Graphs/GeneralGraphs/Functions.html) -of the [graph module](../../Manual/Graphs/index.html) -on the [knows graph](../../Manual/Graphs/index.html#the-knowsgraph): - -![Social Example Graph](../../Manual/Graphs/knows_graph.png) - -@startDocuBlock general_graph_edge_create_http_examples - -@startDocuBlock general_graph_edge_get_http_examples - -Examples will explain the API on the [social graph](../../Manual/Graphs/index.html#the-social-graph): - -![Social Example Graph](../../Manual/Graphs/social_graph.png) - -@startDocuBlock general_graph_edge_modify_http_examples - -@startDocuBlock general_graph_edge_replace_http_examples - -@startDocuBlock general_graph_edge_delete_http_examples diff --git a/Documentation/Books/HTTP/Gharial/Management.md b/Documentation/Books/HTTP/Gharial/Management.md deleted file mode 100644 index 6b3dd95cc966..000000000000 --- a/Documentation/Books/HTTP/Gharial/Management.md +++ /dev/null @@ -1,19 +0,0 @@ -Manage your graphs -================== - -The [graph module](../../Manual/Graphs/index.html) provides functions dealing with graph structures. -Examples will explain the REST API on the [social graph](../../Manual/Graphs/index.html#the-social-graph): - -![Social Example Graph](../../Manual/Graphs/social_graph.png) - -@startDocuBlock general_graph_list_http_examples -@startDocuBlock general_graph_create_http_examples -@startDocuBlock general_graph_get_http_examples -@startDocuBlock general_graph_drop_http_examples -@startDocuBlock general_graph_list_vertex_http_examples -@startDocuBlock general_graph_vertex_collection_add_http_examples -@startDocuBlock general_graph_vertex_collection_remove_http_examples -@startDocuBlock general_graph_list_edge_http_examples -@startDocuBlock general_graph_edge_definition_add_http_examples -@startDocuBlock general_graph_edge_definition_modify_http_examples -@startDocuBlock general_graph_edge_definition_remove_http_examples diff --git a/Documentation/Books/HTTP/Gharial/README.md b/Documentation/Books/HTTP/Gharial/README.md deleted file mode 100644 index 17868cc61fdb..000000000000 --- a/Documentation/Books/HTTP/Gharial/README.md +++ /dev/null @@ -1,6 +0,0 @@ -General Graphs -============== - -This chapter describes the REST interface for the [multi-collection graph module](../../Manual/Graphs/index.html). -It allows you to define a graph that is spread across several edge and document collections. -There is no need to include the referenced collections within the query, this module will handle it for you. diff --git a/Documentation/Books/HTTP/Gharial/Vertices.md b/Documentation/Books/HTTP/Gharial/Vertices.md deleted file mode 100644 index 09ab9dd152ef..000000000000 --- a/Documentation/Books/HTTP/Gharial/Vertices.md +++ /dev/null @@ -1,13 +0,0 @@ -Handling Vertices -================= - -Examples will explain the REST API to the [graph module](../../Manual/Graphs/index.html) -on the [social graph](../../Manual/Graphs/index.html#the-social-graph): - -![Social Example Graph](../../Manual/Graphs/social_graph.png) - -@startDocuBlock general_graph_vertex_create_http_examples -@startDocuBlock general_graph_vertex_get_http_examples -@startDocuBlock general_graph_vertex_modify_http_examples -@startDocuBlock general_graph_vertex_replace_http_examples -@startDocuBlock general_graph_vertex_delete_http_examples diff --git a/Documentation/Books/HTTP/Indexes/Fulltext.md b/Documentation/Books/HTTP/Indexes/Fulltext.md deleted file mode 100644 index 67b689f734b9..000000000000 --- a/Documentation/Books/HTTP/Indexes/Fulltext.md +++ /dev/null @@ -1,11 +0,0 @@ -Fulltext -======== - -If a [fulltext index](../../Manual/Appendix/Glossary.html#fulltext-index) exists, then -`/_api/simple/fulltext` will use this index to execute the specified fulltext query. - - -@startDocuBlock post_api_index_fulltext - - -@startDocuBlock put_api_simple_fulltext diff --git a/Documentation/Books/HTTP/Indexes/Geo.md b/Documentation/Books/HTTP/Indexes/Geo.md deleted file mode 100644 index 90f6d1866c6e..000000000000 --- a/Documentation/Books/HTTP/Indexes/Geo.md +++ /dev/null @@ -1,11 +0,0 @@ -Working with Geo Indexes -======================== - - -@startDocuBlock post_api_index_geo - - -@startDocuBlock put_api_simple_near - - -@startDocuBlock put_api_simple_within \ No newline at end of file diff --git a/Documentation/Books/HTTP/Indexes/Hash.md b/Documentation/Books/HTTP/Indexes/Hash.md deleted file mode 100644 index fa375f99e3df..000000000000 --- a/Documentation/Books/HTTP/Indexes/Hash.md +++ /dev/null @@ -1,14 +0,0 @@ -Working with Hash Indexes -========================= - -If a suitable hash index exists, then `/_api/simple/by-example` will use this -index to execute a query-by-example. - - -@startDocuBlock post_api_index_hash - - -@startDocuBlock put_api_simple_by_example - - -@startDocuBlock put_api_simple_first_example diff --git a/Documentation/Books/HTTP/Indexes/Persistent.md b/Documentation/Books/HTTP/Indexes/Persistent.md deleted file mode 100644 index 3a6d233a86ba..000000000000 --- a/Documentation/Books/HTTP/Indexes/Persistent.md +++ /dev/null @@ -1,8 +0,0 @@ -Working with Persistent Indexes -=============================== - -If a suitable persistent index exists, then `/_api/simple/range` and other operations -will use this index to execute queries. - - -@startDocuBlock post_api_index_persistent diff --git a/Documentation/Books/HTTP/Indexes/README.md b/Documentation/Books/HTTP/Indexes/README.md deleted file mode 100644 index 8c4bc8fcf58a..000000000000 --- a/Documentation/Books/HTTP/Indexes/README.md +++ /dev/null @@ -1,68 +0,0 @@ -HTTP Interface for Indexes -========================== - -Indexes -------- - -This is an introduction to ArangoDB's HTTP interface for indexes in -general. There are special sections for various index types. - -### Index - -Indexes are used to allow fast access to documents. For each collection there is always the primary index which is a hash index for the -[document key](../../Manual/Appendix/Glossary.html#document-key) (_key attribute). This index cannot be dropped or changed. -[edge collections](../../Manual/Appendix/Glossary.html#edge-collection) will also have an automatically created edges index, which cannot be modified. This index provides quick access to documents via the `_from` and `_to` attributes. - -Most user-land indexes can be created by defining the names of the attributes which should be indexed. Some index types allow indexing just one attribute (e.g. fulltext index) whereas other index types allow indexing multiple attributes. - -Using the system attribute `_id` in user-defined indexes is not supported by any index type. - -### Index Handle - -An index handle uniquely identifies an index in the database. It is a string and consists of a collection name and an index identifier separated by /. -If the index is declared unique, then access to the indexed attributes should be fast. The performance degrades if the indexed attribute(s) contain(s) only very few distinct values. - -### Primary Index - -A primary index is automatically created for each collections. It indexes the documents' primary keys, which are stored in the `_key` system attribute. The primary index is unique and can be used for queries on both the `_key` and `_id` attributes. -There is no way to explicitly create or delete primary indexes. - -### Edge Index - -An edge index is automatically created for edge collections. It contains connections between vertex documents and is invoked when the connecting edges of a vertex are queried. There is no way to explicitly create or delete edge indexes. -The edge index is non-unique. - -### Hash Index - -A hash index is an unsorted index that can be used to find individual documents by equality lookups. - -### Skiplist Index - -A skiplist is a sorted index that can be used to find individual documents or ranges of documents. - -### Persistent Index - -A persistent index is a sorted index that can be used for finding individual documents or ranges of documents. -In constrast to the other indexes, the contents of a persistent index are stored on disk and thus do not need to be rebuilt in memory from the documents when the collection is loaded. - -### TTL (time-to-live) index - -The TTL index can be used for automatically removing expired documents from a collection. -Documents which are expired are eventually removed by a background thread. - -### Fulltext Index - -A fulltext index can be used to find words, or prefixes of words inside documents. A fulltext index can be set on one attribute only, and will index all words contained in documents that have a textual value in this attribute. Only words with a (specifiable) minimum length are indexed. Word tokenization is done using the word boundary analysis provided by libicu, which is taking into account the selected language provided at server start. Words are indexed in their lower-cased form. The index supports complete match queries (full words) and prefix queries. - -Address of an Index -------------------- - -All indexes in ArangoDB have an unique handle. This index handle identifies an -index and is managed by ArangoDB. All indexes are found under the URI - - http://server:port/_api/index/index-handle - -For example: Assume that the index handle is *demo/63563528* then the URL of -that index is: - - http://localhost:8529/_api/index/demo/63563528 diff --git a/Documentation/Books/HTTP/Indexes/Skiplist.md b/Documentation/Books/HTTP/Indexes/Skiplist.md deleted file mode 100644 index bdd225784e0f..000000000000 --- a/Documentation/Books/HTTP/Indexes/Skiplist.md +++ /dev/null @@ -1,8 +0,0 @@ -Working with Skiplist Indexes -============================= - -If a suitable skip-list index exists, then `/_api/simple/range` and other operations -will use this index to execute queries. - - -@startDocuBlock post_api_index_skiplist diff --git a/Documentation/Books/HTTP/Indexes/Ttl.md b/Documentation/Books/HTTP/Indexes/Ttl.md deleted file mode 100644 index 3cd3e441f43c..000000000000 --- a/Documentation/Books/HTTP/Indexes/Ttl.md +++ /dev/null @@ -1,4 +0,0 @@ -Working with TTL (time-to-live) Indexes -======================================= - -@startDocuBlock post_api_index_ttl diff --git a/Documentation/Books/HTTP/Indexes/WorkingWith.md b/Documentation/Books/HTTP/Indexes/WorkingWith.md deleted file mode 100644 index da7d5ea9df41..000000000000 --- a/Documentation/Books/HTTP/Indexes/WorkingWith.md +++ /dev/null @@ -1,14 +0,0 @@ -Working with Indexes using HTTP -=============================== - - -@startDocuBlock get_api_reads_index - - -@startDocuBlock post_api_index - - -@startDocuBlock post_api_index_delete - - -@startDocuBlock get_api_index \ No newline at end of file diff --git a/Documentation/Books/HTTP/MiscellaneousFunctions/README.md b/Documentation/Books/HTTP/MiscellaneousFunctions/README.md deleted file mode 100644 index def2fde1f166..000000000000 --- a/Documentation/Books/HTTP/MiscellaneousFunctions/README.md +++ /dev/null @@ -1,40 +0,0 @@ -HTTP Interface for Miscellaneous functions -========================================== - -This is an overview of ArangoDB's HTTP interface for miscellaneous functions. - - -@startDocuBlock get_api_return - - -@startDocuBlock get_engine - - -@startDocuBlock put_admin_wal_flush - - -@startDocuBlock get_admin_wal_properties - - -@startDocuBlock put_admin_wal_properties - - -@startDocuBlock get_admin_wal_transactions - - -@startDocuBlock get_admin_time - - -@startDocuBlock post_admin_echo - -@startDocuBlock get_admin_database_version - - -@startDocuBlock delete_api_shutdown - - -@startDocuBlock post_admin_execute - - -@startDocuBlock JSF_get_admin_status - diff --git a/Documentation/Books/HTTP/README.md b/Documentation/Books/HTTP/README.md deleted file mode 100644 index e76384b971ca..000000000000 --- a/Documentation/Books/HTTP/README.md +++ /dev/null @@ -1,21 +0,0 @@ -ArangoDB VERSION_NUMBER HTTP API Documentation -============================================== - -Welcome to the ArangoDB HTTP API documentation! This documentation is -for API developers. As a user or administrator of ArangoDB you should -not need the information provided herein. - -In general, as a user of ArangoDB you will use one of the language -[drivers](https://www.arangodb.com/arangodb-drivers/). - - -**Interactive Swagger documentation** - -Please note that your ArangoDB comes with an interactive version of -this documentation using [Swagger](https://swagger.io): - -![accessing the documentation via swagger](assets/swagger_serverapi_overview.png) - - -We also have a blog post explaining -[how to work with the Swagger API](https://www.arangodb.com/2018/03/using-arangodb-swaggerio-interactive-api-documentation/). diff --git a/Documentation/Books/HTTP/Repairs/README.md b/Documentation/Books/HTTP/Repairs/README.md deleted file mode 100644 index 9438ba9efa2d..000000000000 --- a/Documentation/Books/HTTP/Repairs/README.md +++ /dev/null @@ -1,228 +0,0 @@ -Repair Jobs -=========== - -distributeShardsLike --------------------- - -Before versions 3.2.12 and 3.3.4 there was a bug in the collection creation -which could lead to a violation of the property that its shards were -distributed on the DBServers exactly as the prototype collection from the -`distributeShardsLike` setting. - -**Please read everything carefully before using this API!** - -There is a job that can restore this property safely. However, while the -job is running, -- the `replicationFactor` *must not be changed* for any affected collection or - prototype collection (i.e. set in `distributeShardsLike`, including - [SmartGraphs](../../Manual/Graphs/SmartGraphs/index.html)), -- *neither should shards be moved* of one of those prototypes -- and shutdown of DBServers *should be avoided* -during the repairs. Also only one repair job should run at any given time. -Failure to meet those requirements will mostly cause the job to abort, but still -allow to restart it safely. However, changing the `replicationFactor` during -repairs may leave it in a state that is not repairable without manual -intervention! - -Shutting down the coordinator which executes the job will abort it, but it can -safely be restarted on another coordinator. However, there may still be a shard -move ongoing even after the job stopped. If the job is started again before the -move is finished, repairing the affected collection will fail, but the repair -can be restarted safely. - -If there is any affected collection which `replicationFactor` is equal to -the total number of DBServers, the repairs might abort. In this case, it is -necessary to reduce the `replicationFactor` by one (or add a DBServer). The -job will not do that automatically. - -Generally, the job will abort if any of its assumptions fail, at the start -or during the repairs. It can be started again and will resume from the -current state. - -### Testing with `GET /_admin/repairs/distributeShardsLike` - -Using `GET` will **not** trigger any repairs, but only calculate and return -the operations necessary to repair the cluster. This way, you can also -check if there is something to repair. - -``` -$ wget -qSO - http://localhost:8529/_admin/repair/distributeShardsLike | jq . - HTTP/1.1 200 OK - X-Content-Type-Options: nosniff - Server: ArangoDB - Connection: Keep-Alive - Content-Type: application/json; charset=utf-8 - Content-Length: 53 -{ - "error": false, - "code": 200, - "message": "Nothing to do." -} -``` - -In the example above, all collections with `distributeShardsLike` have their -shards distributed correctly. The response if something is broken looks like -this: - -```json -{ - "error": false, - "code": 200, - "collections": { - "_system/someCollection": { - "PlannedOperations": [ - { - "BeginRepairsOperation": { - "database": "_system", - "collection": "someCollection", - "distributeShardsLike": "aPrototypeCollection", - "renameDistributeShardsLike": true, - "replicationFactor": 4 - } - }, - { - "MoveShardOperation": { - "database": "_system", - "collection": "someCollection", - "shard": "s2000109", - "from": "PRMR-6b8c84be-1e80-4085-9065-177c6e31a702", - "to": "PRMR-d3e62c96-c3f7-4766-bac6-f3bf8026f59a", - "isLeader": false - } - }, - { - "MoveShardOperation": { - "database": "_system", - "collection": "someCollection", - "shard": "s2000109", - "from": "PRMR-ee3d7af6-1fbf-4ab7-bfd1-56d0a1c1c9b9", - "to": "PRMR-6b8c84be-1e80-4085-9065-177c6e31a702", - "isLeader": true - } - }, - { - "FixServerOrderOperation": { - "database": "_system", - "collection": "someCollection", - "distributeShardsLike": "aPrototypeCollection", - "shard": "s2000109", - "distributeShardsLikeShard": "s2000092", - "leader": "PRMR-6b8c84be-1e80-4085-9065-177c6e31a702", - "followers": [ - "PRMR-99c2ac17-f417-4710-82aa-8350417dd089", - "PRMR-3b0b85de-882b-4eb2-bbf2-ef1018bdc81e", - "PRMR-d3e62c96-c3f7-4766-bac6-f3bf8026f59a" - ], - "distributeShardsLikeFollowers": [ - "PRMR-d3e62c96-c3f7-4766-bac6-f3bf8026f59a", - "PRMR-99c2ac17-f417-4710-82aa-8350417dd089", - "PRMR-3b0b85de-882b-4eb2-bbf2-ef1018bdc81e" - ] - } - }, - { - "FinishRepairsOperation": { - "database": "_system", - "collection": "someCollection", - "distributeShardsLike": "aPrototypeCollection", - "shards": [ - { - "shard": "s2000109", - "protoShard": "s2000092", - "dbServers": [ - "PRMR-6b8c84be-1e80-4085-9065-177c6e31a702", - "PRMR-d3e62c96-c3f7-4766-bac6-f3bf8026f59a", - "PRMR-99c2ac17-f417-4710-82aa-8350417dd089", - "PRMR-3b0b85de-882b-4eb2-bbf2-ef1018bdc81e" - ] - }, - { - "shard": "s2000110", - "protoShard": "s2000093", - "dbServers": [ - "PRMR-d3e62c96-c3f7-4766-bac6-f3bf8026f59a", - "PRMR-ee3d7af6-1fbf-4ab7-bfd1-56d0a1c1c9b9", - "PRMR-6b8c84be-1e80-4085-9065-177c6e31a702", - "PRMR-99c2ac17-f417-4710-82aa-8350417dd089" - ] - }, -[...] - ] - } - } - ], - "error": false - } - } -} -``` - -If something is to be repaired, the response will have the property -`collections` with an entry `/` for each collection which -has to be repaired. Each collection also as a separate `error` property -which will be `true` iff an error occurred for this collection (and `false` -otherwise). If `error` is `true`, the properties `errorNum` and -`errorMessage` will also be set, and in some cases also `errorDetails` -with additional information on how to handle a specific error. - -### Repairing with `POST /_admin/repairs/distributeShardsLike` - -As this job possibly has to move a lot of data around, it can take a while -depending on the size of the affected collections. So this should *not -be called synchronously*, but only via -[Async Results](../../HTTP/AsyncResultsManagement/index.html): i.e., set the -header `x-arango-async: store` to put the job into background and get -its results later. Otherwise the request will most probably result in a -timeout and the response will be lost! The job will still continue unless -the coordinator is stopped, but there is no way to find out if it is -still running, or get success or error information afterwards. - -Starting the job in background can be done like so: - -``` -$ wget --method=POST --header='x-arango-async: store' -qSO - http://localhost:8529/_admin/repair/distributeShardsLike - HTTP/1.1 202 Accepted - X-Content-Type-Options: nosniff - X-Arango-Async-Id: 152223973119118 - Server: ArangoDB - Connection: Keep-Alive - Content-Type: text/plain; charset=utf-8 - Content-Length: 0 -``` - -This line is of notable importance: -``` - X-Arango-Async-Id: 152223973119118 -``` -as it contains the job id which can be used to fetch the state and results -of the job later. `GET`ting `/_api/job/pending` and `/_api/job/done` will list -job ids of jobs that are pending or done, respectively. - -This can also be done with the `GET` method for testing. - -The job api must be used to fetch the state and results. It will return -a `204` while the job is running. The actual response will be returned -only once, after that the job is deleted and the api will return a `404`. -It is therefore recommended to write the response directly to a file for -later inspection. Fetching the result is done by calling `/_api/job` via -`PUT`: - -``` -$ wget --method=PUT -qSO - http://localhost:8529/_api/job/152223973119118 | jq . - HTTP/1.1 200 OK - X-Content-Type-Options: nosniff - X-Arango-Async-Id: 152223973119118 - Server: ArangoDB - Connection: Keep-Alive - Content-Type: application/json; charset=utf-8 - Content-Length: 53 -{ - "error": false, - "code": 200, - "message": "Nothing to do." -} -``` - -The final response will look like the response of the `GET` call. -If an error occurred the response should contain details on how to proceed. -If in doubt, ask as on Slack: https://arangodb.com/community/ diff --git a/Documentation/Books/HTTP/Replications/OtherReplication.md b/Documentation/Books/HTTP/Replications/OtherReplication.md deleted file mode 100644 index c47e18cbd0ba..000000000000 --- a/Documentation/Books/HTTP/Replications/OtherReplication.md +++ /dev/null @@ -1,5 +0,0 @@ -Other Replication Commands -========================== - - -@startDocuBlock put_api_replication_serverID diff --git a/Documentation/Books/HTTP/Replications/README.md b/Documentation/Books/HTTP/Replications/README.md deleted file mode 100644 index e10f9c279122..000000000000 --- a/Documentation/Books/HTTP/Replications/README.md +++ /dev/null @@ -1,21 +0,0 @@ -HTTP Interface for Replication -============================== - -Replication ------------ - -This is an introduction to ArangoDB's HTTP replication interface. -The replication architecture and components are described in more details in -[Replication](../../Manual/Architecture/Replication/index.html). - -The HTTP replication interface serves four main purposes: -- fetch initial data from a server (e.g. for a backup, or for the initial synchronization - of data before starting the continuous replication applier) -- querying the state of a master -- fetch continuous changes from a master (used for incremental synchronization of changes) -- administer the replication applier (starting, stopping, configuring, querying state) on - a slave - -Please note that if a per-database setup is used (as opposed to server-level replication, -available since v3.3.0), then the replication system must be configured individually per -database, and replicating the data of multiple databases will require multiple operations. diff --git a/Documentation/Books/HTTP/Replications/ReplicationApplier.md b/Documentation/Books/HTTP/Replications/ReplicationApplier.md deleted file mode 100644 index bd4e61d064aa..000000000000 --- a/Documentation/Books/HTTP/Replications/ReplicationApplier.md +++ /dev/null @@ -1,24 +0,0 @@ -Replication Applier Commands -============================ - -The applier commands allow to remotely start, stop, and query the state and -configuration of an ArangoDB database's replication applier. - - -@startDocuBlock put_api_replication_applier - - -@startDocuBlock put_api_replication_applier_adjust - - -@startDocuBlock put_api_replication_applier_start - - -@startDocuBlock put_api_replication_applier_stop - - -@startDocuBlock get_api_replication_applier_state - - -@startDocuBlock put_api_replication_makeSlave - diff --git a/Documentation/Books/HTTP/Replications/ReplicationDump.md b/Documentation/Books/HTTP/Replications/ReplicationDump.md deleted file mode 100644 index 21ab69cc9c71..000000000000 --- a/Documentation/Books/HTTP/Replications/ReplicationDump.md +++ /dev/null @@ -1,46 +0,0 @@ -Replication Dump Commands -========================= - -The *inventory* method can be used to query an ArangoDB database's current -set of collections plus their indexes. Clients can use this method to get an -overview of which collections are present in the database. They can use this information -to either start a full or a partial synchronization of data, e.g. to initiate a backup -or the incremental data synchronization. - - -@startDocuBlock put_api_replication_inventory - - -The *batch* method will create a snapshot of the current state that then can be -dumped. A batchId is required when using the dump api with rocksdb. - -@startDocuBlock post_batch_replication - -@startDocuBlock delete_batch_replication - -@startDocuBlock put_batch_replication - - -The *dump* method can be used to fetch data from a specific collection. As the -results of the dump command can be huge, *dump* may not return all data from a collection -at once. Instead, the dump command may be called repeatedly by replication clients -until there is no more data to fetch. The dump command will not only return the -current documents in the collection, but also document updates and deletions. - -Please note that the *dump* method will only return documents, updates and deletions -from a collection's journals and datafiles. Operations that are stored in the write-ahead -log only will not be returned. In order to ensure that these operations are included -in a dump, the write-ahead log must be flushed first. - -To get to an identical state of data, replication clients should apply the individual -parts of the dump results in the same order as they are provided. - - -@startDocuBlock get_api_replication_dump - - - -@startDocuBlock put_api_replication_synchronize - - -@startDocuBlock get_api_replication_cluster_inventory diff --git a/Documentation/Books/HTTP/Replications/ReplicationLogger.md b/Documentation/Books/HTTP/Replications/ReplicationLogger.md deleted file mode 100644 index 5dedcc8fd857..000000000000 --- a/Documentation/Books/HTTP/Replications/ReplicationLogger.md +++ /dev/null @@ -1,33 +0,0 @@ -Replication Logger Commands -=========================== - -Previous versions of ArangoDB allowed starting, stopping and configuring the -replication logger. These commands are superfluous in ArangoDB 2.2 as all -data-modification operations are written to the server's write-ahead log and are -not handled by a separate logger anymore. - -The only useful operations remaining since ArangoDB 2.2 are to query the current state -of the logger and to fetch the latest changes written by the logger. The operations -will return the state and data from the write-ahead log. - - -@startDocuBlock get_api_replication_logger_return_state - -To query the latest changes logged by the replication logger, the HTTP interface -also provides the `logger-follow` method. - -This method should be used by replication clients to incrementally fetch updates -from an ArangoDB database. - - -@startDocuBlock get_api_replication_logger_follow - -To check what range of changes is available (identified by tick values), the HTTP -interface provides the methods `logger-first-tick` and `logger-tick-ranges`. -Replication clients can use the methods to determine if certain data (identified -by a tick *date*) is still available on the master. - -@startDocuBlock get_api_replication_logger_first_tick - -@startDocuBlock get_api_replication_logger_tick_ranges - diff --git a/Documentation/Books/HTTP/Replications/WALAccess.md b/Documentation/Books/HTTP/Replications/WALAccess.md deleted file mode 100644 index 3ec19ea4524a..000000000000 --- a/Documentation/Books/HTTP/Replications/WALAccess.md +++ /dev/null @@ -1,361 +0,0 @@ -WAL Access API -=========================== - -The WAL Access API is used from 3.3 onwards to facilitate faster and -more reliable asynchronous replication. The API offers access to the -write-ahead log or operations log of the ArangoDB server. As a public -API it is only supported to access these REST endpoints on a single-server -instance. While these APIs are also available on DBServer instances, accessing them -as a user is not supported. This API replaces some of the APIs in `/_api/replication`. - - -@startDocuBlock get_api_wal_access_range - -@startDocuBlock get_api_wal_access_last_tick - -@startDocuBlock get_api_wal_access_tail - -Operation Types ----------------- - -There are several different operation types thar an ArangoDB server might print. -All operations include a `tick` value which identified their place in the operations log. -The numeric fields _tick_ and _tid_ always contain stringified numbers to avoid problems with -drivers where numbers in JSON might be mishandled. - -The following operation types are used in ArangoDB: - -### Create Database (1100) - -Create a database. Contains the field _db_ with the database name and the field _data_, -contains the database definition. -```json -{ - "tick": "2103", - "type": 1100, - "db": "test", - "data": { - "database": 337, - "id": "337", - "name": "test" - } -} -``` - -### Drop Database (1100) - -Drop a database. Contains the field _db_ with the database name. -```json -{ - "tick": "3453", - "type": 1101, - "db": "test" -} -``` - -### Create Collection (2000) - -Create a collection. Contains the field _db_ with the database name, and _cuid_ with the -globally unique id to identify this collection. The *data* attribute contains the collection definition. - -```json -{ - "tick": "3702", - "db": "_system", - "cuid": "hC0CF79DA83B4/555", - "type": 2000, - "data": { - "allowUserKeys": true, - "cacheEnabled": false, - "cid": "555", - "deleted": false, - "globallyUniqueId": "hC0CF79DA83B4/555", - "id": "555", - "indexes": [], - "isSystem": false, - "keyOptions": { - "allowUserKeys": true, - "lastValue": 0, - "type": "traditional" - }, - "name": "test" - } -} -``` - -### Drop Collection (2001) - -Drop a collection. Contains the field _db_ with the database name, and _cuid_ with the -globally unique id to identify this collection. - -```json -{ - "tick": "154", - "type": 2001, - "db": "_system", - "cuid": "hD15F8FE99859/555" -} -``` - -### Rename Collection (2002) - -Rename a collection. Contains the field _db_ with the database name, and _cuid_ with the -globally unique id to identify this collection. The _data_ field contains the *name* field -with the new name - -```json -{ - "tick": "385", - "db": "_system", - "cuid": "hD15F8FE99859/135", - "type": 2002, - "data": { - "name": "other" - } -} -``` - -### Change Collection (2003) - -Change collection properties. Contains the field _db_ with the database name, and _cuid_ with the -globally unique id to identify this collection. The *data* attribute contains the updated collection definition. - -```json -{ - "tick": "154", - "type": 2003, - "db": "_system", - "cuid": "hD15F8FE99859/555", - "data": { - "waitForSync": true - } -} -``` - -### Truncate Collection (2004) - -Truncate a collection. Contains the field _db_ with the database name, and _cuid_ with the -globally unique id to identify this collection. - -```json -{ - "tick": "154", - "type": 2004, - "db": "_system", - "cuid": "hD15F8FE99859/555" -} -``` - -### Create Index (2100) - -Create an index. Contains the field _db_ with the database name, and _cuid_ with the -globally unique id to identify this collection. The field _data_ contains the index -definition. - -```json -{ - "tick": "1327", - "type": 2100, - "db": "_system", - "cuid": "hD15F8FE99859/555", - "data": { - "deduplicate": true, - "fields": [ - "value" - ], - "id": "260", - "selectivityEstimate": 1, - "sparse": false, - "type": "skiplist", - "unique": false - } -} -``` - -### Drop Index (2101) - -Drop an index. Contains the field _db_ with the database name, and _cuid_ with the -globally unique id to identify this collection. The field _data_ contains the field -*id* with the index id. - -```json -{ - "tick": "1522", - "type": 2101, - "db": "_system", - "cuid": "hD15F8FE99859/555", - "data": { - "id": "260" - } -} -``` - -### Create View (2110) - -Create a view. Contains the field _db_ with the database name, and _cuid_ with the -globally unique id to identify this view. The field _data_ contains the view definition - -```json -{ - "tick": "1833", - "type": 2110, - "db": "_system", - "cuid": "hD15F8FE99859/322", - "data": { - "cleanupIntervalStep": 10, - "collections": [], - "commitIntervalMsec": 60000, - "consolidate": { - "segmentThreshold": 300, - "threshold": 0.8500000238418579, - "type": "bytes_accum" - }, - "deleted": false, - "globallyUniqueId": "hD15F8FE99859/322", - "id": "322", - "isSystem": false, - "locale": "C", - "name": "myview", - "type": "arangosearch" - } -} -``` - -### Drop View (2111) - -Drop a view. Contains the field _db_ with the database name, and _cuid_ with the -globally unique id to identify this view. - -```json -{ - "tick": "3113", - "type": 2111, - "db": "_system", - "cuid": "hD15F8FE99859/322" -} -``` - -### Change View (2112) - -Change view properties (including the name). Contains the field _db_ with the database name and _cuid_ with the -globally unique id to identify this view. The *data* attribute contain the updated properties. - -```json -{ - "tick": "3014", - "type": 2112, - "db": "_system", - "cuid": "hD15F8FE99859/457", - "data": { - "cleanupIntervalStep": 10, - "collections": [ - 135 - ], - "commitIntervalMsec": 60000, - "consolidate": { - "segmentThreshold": 300, - "threshold": 0.8500000238418579, - "type": "bytes_accum" - }, - "deleted": false, - "globallyUniqueId": "hD15F8FE99859/457", - "id": "457", - "isSystem": false, - "locale": "C", - "name": "renamedview", - "type": "arangosearch" - } -} -``` - -### Start Transaction (2200) - -Mark the beginning of a transaction. Contains the field _db_ with the database name -and the field _tid_ for the transaction id. This log entry might be followed -by zero or more document operations and then either one commit **or** an abort operation -(i.e. types *2300*, *2302* and *2201* / *2202*) with the same _tid_ value. - -```json -{ - "tick": "3651", - "type": 2200, - "db": "_system", - "tid": "556" -} -``` - -### Commit Transaction (2201) - -Mark the successful end of a transaction. Contains the field _db_ with the database name -and the field _tid_ for the transaction id. - -```json -{ - "tick": "3652", - "type": 2201, - "db": "_system", - "tid": "556" -} -``` - -### Abort Transaction (2202) - -Mark the abortion of a transaction. Contains the field _db_ with the database name -and the field _tid_ for the transaction id. - -```json -{ - "tick": "3654", - "type": 2202, - "db": "_system", - "tid": "556" -} -``` - -### Insert / Replace Document (2300) - -Insert or replace a document. Contains the field _db_ with the database name, -_cuid_ with the globally unique id to identify the collection and the field _tid_ for -the transaction id. The field *tid* might contain the value *"0"* to identify a single -operation that is not part of a multi-document transaction. The field *data* contains the -document. If the field *_rev* exists the client can choose to perform a revision check against -a locally available version of the document to ensure consistency. - -```json -{ - "tick": "196", - "type": 2300, - "db": "_system", - "tid": "0", - "cuid": "hE0E3D7BE511D/119", - "data": { - "_id": "users/194", - "_key": "194", - "_rev": "_XUJFD3C---", - "value": "test" - } -} -``` - -### Remove Document (2302) - -Remove a document. Contains the field _db_ with the database name, -_cuid_ with the globally unique id to identify the collection and the field _tid_ for -the transaction id. The field *tid* might contain the value *"0"* to identify a single -operation that is not part of a multi-document transaction. The field *data* contains the -*_key* and *_rev* of the removed document. The client can choose to perform a revision check against -a locally available version of the document to ensure consistency. - -```json -{ - "cuid": "hE0E3D7BE511D/119", - "data": { - "_key": "194", - "_rev": "_XUJIbS---_" - }, - "db": "_system", - "tick": "397", - "tid": "0", - "type": 2302 -} -``` diff --git a/Documentation/Books/HTTP/SUMMARY.md b/Documentation/Books/HTTP/SUMMARY.md deleted file mode 100644 index 1460944b92c3..000000000000 --- a/Documentation/Books/HTTP/SUMMARY.md +++ /dev/null @@ -1,78 +0,0 @@ - -# Summary -* [Introduction](README.md) -* [General HTTP Handling](General/README.md) -* [HTTP Interface](Api/README.md) -* [Databases](Database/README.md) - * [To-Endpoint](Database/DatabaseEndpoint.md) - * [Management](Database/DatabaseManagement.md) - * [Notes on Databases](Database/NotesOnDatabases.md) -* [Collections](Collection/README.md) - * [Creating](Collection/Creating.md) - * [Getting Information](Collection/Getting.md) - * [Modifying](Collection/Modifying.md) -* [Documents](Document/README.md) - * [Basics and Terminology](Document/AddressAndEtag.md) - * [Working with Documents](Document/WorkingWithDocuments.md) -* [Edges](Edge/README.md) - * [Address and Etag](Edge/AddressAndEtag.md) - * [Working with Edges](Edge/WorkingWithEdges.md) -* [General Graph](Gharial/README.md) - * [Management](Gharial/Management.md) - * [Vertices](Gharial/Vertices.md) - * [Edges](Gharial/Edges.md) -* [Traversals](Traversal/README.md) -* [AQL Query Cursors](AqlQueryCursor/README.md) - * [Query Results](AqlQueryCursor/QueryResults.md) - * [Accessing Cursors](AqlQueryCursor/AccessingCursors.md) -* [AQL Queries](AqlQuery/README.md) -* [AQL Query Results Cache](AqlQueryCache/README.md) -* [AQL User Functions Management](AqlUserFunctions/README.md) -* [Simple Queries](SimpleQuery/README.md) -* [Async Result Handling](AsyncResultsManagement/README.md) -* [Bulk Import / Export](BulkImports/README.md) - * [JSON Documents](BulkImports/ImportingSelfContained.md) - * [Headers & Values](BulkImports/ImportingHeadersAndValues.md) - * [Batch Requests](BatchRequest/README.md) - * [Exporting data](Export/README.md) -* [Indexes](Indexes/README.md) - * [Working with Indexes](Indexes/WorkingWith.md) - * [Hash](Indexes/Hash.md) - * [Skiplist](Indexes/Skiplist.md) - * [Persistent](Indexes/Persistent.md) - * [TTL](Indexes/Ttl.md) - * [Geo-Spatial](Indexes/Geo.md) - * [Fulltext](Indexes/Fulltext.md) -* [Views](Views/README.md) - * [Creating](Views/Creating.md) - * [Deleting](Views/Dropping.md) - * [Modifying](Views/Modifying.md) - * [Retrieving](Views/Getting.md) - * [ArangoSearch Views](Views/ArangoSearch.md) -* [Analyzers](Analyzers/README.md) -* [Transactions](Transaction/README.md) - * [Stream Transactions](Transaction/StreamTransaction.md) - * [JavaScript Transactions](Transaction/JsTransaction.md) -* [Replication](Replications/README.md) - * [Replication Dump](Replications/ReplicationDump.md) - * [Replication Logger](Replications/ReplicationLogger.md) - * [Replication Applier](Replications/ReplicationApplier.md) - * [Other Replication Commands](Replications/OtherReplication.md) - * [Write-Ahead Log](Replications/WALAccess.md) -* [Administration & Monitoring](AdministrationAndMonitoring/README.md) -* [Endpoints](Endpoints/README.md) -* [Foxx Services](Foxx/README.md) - * [Management](Foxx/Management.md) - * [Configuration](Foxx/Configuration.md) - * [Miscellaneous](Foxx/Miscellaneous.md) -* [User Management](UserManagement/README.md) -* [Tasks](Tasks/README.md) -* [Cluster](Cluster/README.md) - * [Server ID](Cluster/ServerId.md) - * [Server Role](Cluster/ServerRole.md) - * [Cluster Statistics](Cluster/Statistics.md) - * [Cluster Health](Cluster/Health.md) - * [Cluster Maintenance](Cluster/Maintenance.md) - * [Agency](Agency/README.md) -* [Miscellaneous functions](MiscellaneousFunctions/README.md) -* [Repair Jobs](Repairs/README.md) diff --git a/Documentation/Books/HTTP/SimpleQuery/README.md b/Documentation/Books/HTTP/SimpleQuery/README.md deleted file mode 100644 index e66f20792ec6..000000000000 --- a/Documentation/Books/HTTP/SimpleQuery/README.md +++ /dev/null @@ -1,77 +0,0 @@ -HTTP Interface for Simple Queries -================================= - -{% hint 'warning' %} -The Simple Queries API is deprecated from version 3.4.0 on. -These endpoints should no longer be used. -They are superseded by AQL queries. -{% endhint %} - -Simple Queries --------------- - -This is an introduction to ArangoDB's HTTP interface for simple queries. - -Simple queries can be used if the query condition is straight forward simple, -i.e., a document reference, all documents, a query-by-example, or a simple geo -query. In a simple query you can specify exactly one collection and one -condition. The result can then be sorted and can be split into pages. - -Working with Simples Queries using HTTP ---------------------------------------- - -To limit the amount of results to be transferred in one batch, simple queries -support a *batchSize* parameter that can optionally be used to tell the server -to limit the number of results to be transferred in one batch to a certain -value. If the query has more results than were transferred in one go, more -results are waiting on the server so they can be fetched subsequently. If no -value for the *batchSize* parameter is specified, the server will use a -reasonable default value. - -If the server has more documents than should be returned in a single batch, the -server will set the *hasMore* attribute in the result. It will also return the -id of the server-side cursor in the *id* attribute in the result. This id can -be used with the cursor API to fetch any outstanding results from the server and -dispose the server-side cursor afterwards. - - -@startDocuBlock put_api_simple_all - - -@startDocuBlock put_api_simple_by_example - - -@startDocuBlock put_api_simple_first_example - - -@startDocuBlock RestLookupByKeys - - -@startDocuBlock put_api_simple_any - - -@startDocuBlock RestRemoveByKeys - - -@startDocuBlock put_api_simple_remove_by_example - - -@startDocuBlock put_api_simple_replace_by_example - - -@startDocuBlock put_api_simple_update_by_example - - -@startDocuBlock put_api_simple_range - - -@startDocuBlock put_api_simple_near - - -@startDocuBlock put_api_simple_within - - -@startDocuBlock put_api_simple_within_rectangle - - -@startDocuBlock put_api_simple_fulltext diff --git a/Documentation/Books/HTTP/Tasks/README.md b/Documentation/Books/HTTP/Tasks/README.md deleted file mode 100644 index 835f161596a8..000000000000 --- a/Documentation/Books/HTTP/Tasks/README.md +++ /dev/null @@ -1,16 +0,0 @@ -HTTP tasks Interface -==================== - -Following you have ArangoDB's HTTP Interface for Tasks. - -There are also some examples provided for every API action. - -@startDocuBlock get_api_tasks_all - -@startDocuBlock get_api_tasks - -@startDocuBlock post_api_new_tasks - -@startDocuBlock put_api_new_tasks - -@startDocuBlock delete_api_tasks diff --git a/Documentation/Books/HTTP/Transaction/JsTransaction.md b/Documentation/Books/HTTP/Transaction/JsTransaction.md deleted file mode 100644 index acf04a369080..000000000000 --- a/Documentation/Books/HTTP/Transaction/JsTransaction.md +++ /dev/null @@ -1,24 +0,0 @@ -HTTP Interface for JavaScript Transactions -========================================== - -ArangoDB's JS-transactions are executed on the server. Transactions can be -initiated by clients by sending the transaction description for execution to -the server. - -JS-Transactions in ArangoDB do not offer separate *BEGIN*, *COMMIT* and *ROLLBACK* -operations. Instead, ArangoDB JS-transactions are described by a JavaScript function, -and the code inside the JavaScript function will then be executed transactionally. - -At the end of the function, the transaction is automatically committed, and all -changes done by the transaction will be persisted. If an exception is thrown -during transaction execution, all operations performed in the transaction are -rolled back. - -For a more detailed description of how transactions work in ArangoDB please -refer to [Transactions](../../Manual/Transactions/index.html). - - - -@startDocuBlock post_api_transaction - - diff --git a/Documentation/Books/HTTP/Transaction/README.md b/Documentation/Books/HTTP/Transaction/README.md deleted file mode 100644 index cc106dec3cde..000000000000 --- a/Documentation/Books/HTTP/Transaction/README.md +++ /dev/null @@ -1,36 +0,0 @@ -HTTP Interface for Transactions -=============================== - -### Transactions - -ArangoDB's transactions are executed on the server. Transactions can be -executed by clients in two different ways: - -1. Via the [Stream Transaction](StreamTransaction.md) API -2. Via the [JavaScript Transaction](JsTransaction.md) API - -The difference between these two is not difficult to understand, a short primer -is listed below. -For a more detailed description of how transactions work in ArangoDB and -what guarantees ArangoDB can deliver please -refer to [Transactions](../../Manual/Transactions/index.html). - - -### Stream Transactions - -[Stream Transactions](StreamTransaction.md) allow you to perform a multi-document transaction -with individual begin and commit / abort commands. This is similar to -the way traditional RDBMS do it with *BEGIN*, *COMMIT* and *ROLLBACK* operations. - -This the recommended API for larger transactions. However the client is responsible -for making sure that the transaction is committed or aborted when it is no longer needed, -to avoid taking up resources. - -### JavaScript Transactions - -[JS-Transactions](JsTransaction.md) allow you to send the server -a dedicated piece of JavaScript code (i.e. a function), which will be executed transactionally. - -At the end of the function, the transaction is automatically committed, and all -changes done by the transaction will be persisted. No interaction is required by -the client beyond the initial start request. diff --git a/Documentation/Books/HTTP/Transaction/StreamTransaction.md b/Documentation/Books/HTTP/Transaction/StreamTransaction.md deleted file mode 100644 index a465ecac67a2..000000000000 --- a/Documentation/Books/HTTP/Transaction/StreamTransaction.md +++ /dev/null @@ -1,64 +0,0 @@ -HTTP Interface for Stream Transactions -====================================== - -*Stream Transactions* allow you to perform a multi-document transaction -with individual begin and commit / abort commands. This is similar to -the way traditional RDBMS do it with *BEGIN*, *COMMIT* and *ROLLBACK* operations. - -To use a stream transaction a client first sends the (configuration)[#begin-a-transaction] -of the transaction to the ArangoDB server. - -{% hint 'info' %} -Contrary to the [JS-Transaction](JsTransaction.md) the definition of this -transaction must only contain the collections which are going to be used -and (optionally) the various transaction options supported by ArangoDB. -No *action* attribute is supported. -{% endhint %} - -The stream transaction API works in *conjunction* with other APIs in ArangoDB. -To use the transaction for a supported operation a client needs to specify -the transaction identifier in the *x-arango-trx-id* header on each request. -This will automatically cause these operations to use the specified transaction. - -Supported transactional API operations include: - -1. All operations in the [Document API](../Document/WorkingWithDocuments.md) -2. Number of documents via the [Collection API](../Collection/Getting.md#return-number-of-documents-in-a-collection) -3. Truncate a collection via the [Collection API](../Collection/Getting.md#return-number-of-documents-in-a-collection) -4. Create an AQL cursor via the [Cursor API](../AqlQueryCursor/AccessingCursors.md) - -Note that a client *always needs to start the transaction first* and it is required to -explicitly specify the collections used for write accesses. The client is responsible -for making sure that the transaction is committed or aborted when it is no longer needed. -This avoids taking up resources on the ArangoDB server. - -For a more detailed description of how transactions work in ArangoDB please -refer to [Transactions](../../Manual/Transactions/index.html). - -Begin a Transaction -------------------- - - - -@startDocuBlock post_api_transaction_begin - -Check Status of a Transaction ------------------------------ - -@startDocuBlock get_api_transaction - -Commit or Abort a Transaction ------------------------------ - -Committing or aborting a running transaction must be done by the client. -It is *bad practice* to not commit or abort a transaction once you are done -using it. It will force the server to keep resources and collection locks -until the entire transaction times out. - - - -@startDocuBlock put_api_transaction - - - -@startDocuBlock delete_api_transaction diff --git a/Documentation/Books/HTTP/Traversal/README.md b/Documentation/Books/HTTP/Traversal/README.md deleted file mode 100644 index 77cce1fad527..000000000000 --- a/Documentation/Books/HTTP/Traversal/README.md +++ /dev/null @@ -1,28 +0,0 @@ -HTTP Interface for Traversals -============================= - -{% hint 'warning' %} -The API endpoint `/_api/traversal` is deprecated from version 3.4.0 on. -The preferred way to traverse graphs is with AQL. -{% endhint %} - -ArangoDB's graph traversals are executed on the server. Traversals can be -initiated by clients by sending the traversal description for execution to -the server. - -Traversals in ArangoDB are used to walk over a graph stored in one -[edge collection](../../Manual/Appendix/Glossary.html#edge-collection). -It can easily be described which edges of the graph should be followed -and which actions should be performed on each visited vertex. -Furthermore the ordering of visiting the nodes can be -specified, for instance depth-first or breadth-first search -are offered. - -Executing Traversals via HTTP ------------------------------ - -@startDocuBlock HTTP_API_TRAVERSAL - -All examples were using this graph: - -![Persons relation Example Graph](knows_graph.png) diff --git a/Documentation/Books/HTTP/Traversal/knows_graph.png b/Documentation/Books/HTTP/Traversal/knows_graph.png deleted file mode 100644 index 621ce17f5a5c..000000000000 Binary files a/Documentation/Books/HTTP/Traversal/knows_graph.png and /dev/null differ diff --git a/Documentation/Books/HTTP/UserManagement/README.md b/Documentation/Books/HTTP/UserManagement/README.md deleted file mode 100644 index 03033aea30bb..000000000000 --- a/Documentation/Books/HTTP/UserManagement/README.md +++ /dev/null @@ -1,27 +0,0 @@ -HTTP Interface for User Management -================================== - -This is an introduction to ArangoDB's HTTP interface for managing users. - -The interface provides a simple means to add, update, and remove users. All -users managed through this interface will be stored in the system collection -*_users*. You should never manipulate the *_users* collection directly. - -This specialized interface intentionally does not provide all functionality that -is available in the regular document REST API. - -Please note that user operations are not included in ArangoDB's replication. - -@startDocuBlock UserHandling_create -@startDocuBlock UserHandling_grantDatabase -@startDocuBlock UserHandling_grantCollection -@startDocuBlock UserHandling_revokeDatabase -@startDocuBlock UserHandling_revokeCollection -@startDocuBlock UserHandling_fetchDatabaseList -@startDocuBlock UserHandling_fetchDatabasePermission -@startDocuBlock UserHandling_fetchCollectionPermission -@startDocuBlock UserHandling_replace -@startDocuBlock UserHandling_modify -@startDocuBlock UserHandling_delete -@startDocuBlock UserHandling_fetch -@startDocuBlock UserHandling_fetchProperties diff --git a/Documentation/Books/HTTP/Views/ArangoSearch.md b/Documentation/Books/HTTP/Views/ArangoSearch.md deleted file mode 100644 index 80b51c0d0c55..000000000000 --- a/Documentation/Books/HTTP/Views/ArangoSearch.md +++ /dev/null @@ -1,37 +0,0 @@ -ArangoSearch View -================= - -A natively integrated AQL extension that allows one to: - * evaluate together documents located in different collections - * search documents based on AQL boolean expressions and functions - * sort the result set based on how closely each document matched the search condition - -### Creating an ArangoSearch View - -The ArangoSearch specific JSON definition for creating of a view is as follows: - - -@startDocuBlock post_api_view_iresearch - -### Modifying an ArangoSearch View - -The ArangoSearch specific JSON definition for modification of a view is as -follows: - -Update of All Possible Properties ---------------------------------- - -All modifiable properties of a view may be set to the specified definition, -(i.e. "make the view exactly like *this*"), via: - - -@startDocuBlock put_api_view_properties_iresearch - -Update of Specific Properties (delta) -------------------------------------- - -Specific modifiable properties of a view may be set to the specified values, -(i.e. "change only these properties to the specified values"), via: - - -@startDocuBlock patch_api_view_properties_iresearch diff --git a/Documentation/Books/HTTP/Views/Creating.md b/Documentation/Books/HTTP/Views/Creating.md deleted file mode 100644 index f5725d08fd27..000000000000 --- a/Documentation/Books/HTTP/Views/Creating.md +++ /dev/null @@ -1,11 +0,0 @@ -Creating Views -============== - -The JSON definition for creating of a view is implementation dependant and -varies for each supported view type. Please refer to the proper section of -the required view type for details. - -However, in general the format is the following: - - -@startDocuBlock post_api_view diff --git a/Documentation/Books/HTTP/Views/Dropping.md b/Documentation/Books/HTTP/Views/Dropping.md deleted file mode 100644 index 493cf846a17e..000000000000 --- a/Documentation/Books/HTTP/Views/Dropping.md +++ /dev/null @@ -1,8 +0,0 @@ -Deleting Views -============== - -Views, just as collections, can be removed from a database. View removal is -achieved via an API common to all view types, as follows: - - -@startDocuBlock delete_api_view diff --git a/Documentation/Books/HTTP/Views/Getting.md b/Documentation/Books/HTTP/Views/Getting.md deleted file mode 100644 index b3b6fa29ec5f..000000000000 --- a/Documentation/Books/HTTP/Views/Getting.md +++ /dev/null @@ -1,26 +0,0 @@ -Getting Information about a View -================================ - -### View Listing - -A listing of all views in a database, regardless of their type, may be obtained -via: - - -@startDocuBlock get_api_views - -### Basic View Information - -Basic view information, common to all view types, for a specific view may be -obtained via: - - -@startDocuBlock get_api_view_name - -### Full View Information - -A full description, populated with additional properties depending on view -type, for a specific view may be obtained via: - - -@startDocuBlock get_api_view_properties diff --git a/Documentation/Books/HTTP/Views/Modifying.md b/Documentation/Books/HTTP/Views/Modifying.md deleted file mode 100644 index 9a8732c4ad65..000000000000 --- a/Documentation/Books/HTTP/Views/Modifying.md +++ /dev/null @@ -1,37 +0,0 @@ -Modifying a View -================ - -### Renaming a View - -Views, just as collections, can be renamed. View rename is achieved via an API -common to all view types, as follows: - - -@startDocuBlock put_api_view_rename - -### Modifying View Properties - -Some view types allow run-time modification of internal properties. Which, if -any properties are modifiable is implementation dependant and varies for each -supported view type. Please refer to the proper section of the required view -type for details. - -However, in general the format is the following: - -Update of All Possible Properties ---------------------------------- - -All modifiable properties of a view may be set to the specified definition, -(i.e. "make the view exactly like *this*"), via: - - -@startDocuBlock put_api_view_properties - -Update of Specific Properties (delta) -------------------------------------- - -Specific modifiable properties of a view may be set to the specified values, -(i.e. "change only these properties to the specified values"), via: - - -@startDocuBlock patch_api_view_properties diff --git a/Documentation/Books/HTTP/Views/README.md b/Documentation/Books/HTTP/Views/README.md deleted file mode 100644 index b60453d76573..000000000000 --- a/Documentation/Books/HTTP/Views/README.md +++ /dev/null @@ -1,65 +0,0 @@ -HTTP Interface for Views -======================== - -Views ------ - -This is an introduction to ArangoDB's HTTP interface for views. - -### View - -A view consists of documents. It is uniquely identified by its -identifier. -It also has a unique name that clients should -use to identify and access it. View can be renamed. This will -change the view name, but not the view identifier. -Views have a type that is specified by the user when the view -is created. - -The only available view type currently is: [ArangoSearch](ArangoSearch.md). - -### View Identifier - -A view identifier lets you refer to a view in a database. -It is a string value and is unique within the database. -ArangoDB currently uses 64bit unsigned integer values to maintain -view ids internally. When returning view ids to clients, -ArangoDB will put them into a string to ensure the view id is not -clipped by clients that do not support big integers. Clients should treat -the view ids returned by ArangoDB as opaque strings when they store -or use them locally. - -### View Name - -A view name identifies a view in a database. It is a string -and is unique within the database. Unlike the view identifier it is -supplied by the creator of the view . The view name must consist -of letters, digits, and the _ (underscore) and - (dash) characters only. -Please refer to Naming Conventions in ArangoDB for more information on valid -view names. - -Address of a View ------------------ - -All views in ArangoDB have a unique identifier and a unique -name. ArangoDB internally uses the view's unique identifier to -look up the view. This identifier however is managed by ArangoDB -and the user has no control over it. In order to allow users to use -their own names, each view also has a unique name, which is specified -by the user. To access a view from the user perspective, the -view name should be used, i.e.: - - http://server:port/_api/view/view-name - -For example: Assume that the view identifier is *7254820* and -the view name is *demo*, then the URL of that view is: - - http://localhost:8529/_api/view/demo - -### View Operations - -A view instance may be: -* [Created](Creating.md) -* [Retrieved](Getting.md) -* [Modified](Modifying.md) -* [Deleted](Dropping.md) diff --git a/Documentation/Books/HTTP/assets/swagger_serverapi_overview.png b/Documentation/Books/HTTP/assets/swagger_serverapi_overview.png deleted file mode 100644 index 2994a3026a63..000000000000 Binary files a/Documentation/Books/HTTP/assets/swagger_serverapi_overview.png and /dev/null differ diff --git a/Documentation/Books/HTTP/book.json b/Documentation/Books/HTTP/book.json deleted file mode 100644 index 2dd57f3519a1..000000000000 --- a/Documentation/Books/HTTP/book.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "gitbook": "^3.2.2", - "title": "ArangoDB VERSION_NUMBER HTTP API Documentation", - "version": "VERSION_NUMBER", - "author": "ArangoDB GmbH", - "description": "Official HTTP API manual for ArangoDB - the native multi-model NoSQL database", - "language": "en", - "plugins": [ - "-search", - "-lunr", - "-sharing", - "toggle-chapters", - "addcssjs", - "anchorjs", - "sitemap-general@git+https://github.com/Simran-B/gitbook-plugin-sitemap-general.git", - "ga", - "callouts@git+https://github.com/Simran-B/gitbook-plugin-callouts.git", - "edit-link", - "page-toc@git+https://github.com/Simran-B/gitbook-plugin-page-toc.git", - "localized-footer" - ], - "pdf": { - "fontSize": 12, - "toc": true, - "margin": { - "right": 60, - "left": 60, - "top": 35, - "bottom": 35 - } - }, - "styles": { - "website": "styles/website.css" - }, - "pluginsConfig": { - "addcssjs": { - "js": ["styles/header.js", "styles/hs.js"], - "css": ["styles/header.css"] - }, - "sitemap-general": { - "prefix": "https://docs.arangodb.com/devel/HTTP/", - "changefreq": "@GCHANGE_FREQ@", - "priority": @GPRIORITY@ - }, - "ga": { - "token": "UA-81053435-2" - }, - "edit-link": { - "base": "https://github.com/arangodb/arangodb/edit/devel/Documentation/Books/HTTP", - "label": "Edit Page" - }, - "localized-footer": { - "filename": "FOOTER.html" - } - } -} diff --git a/Documentation/Books/HTTP/styles/header.css b/Documentation/Books/HTTP/styles/header.css deleted file mode 100644 index 4ec87c77b0e5..000000000000 --- a/Documentation/Books/HTTP/styles/header.css +++ /dev/null @@ -1,305 +0,0 @@ -/* Design fix because of the header */ -@import url(https://fonts.googleapis.com/css?family=Roboto:400,500,300,700); - -body { - overflow: hidden; - font-family: Roboto, Helvetica, sans-serif; - background: #444444; -} - -.book .book-header h1 a, .book .book-header h1 a:hover { - display: none; -} - -/* GOOGLE START */ - -.google-search #gsc-iw-id1{ - border: none !important; -} - -.google-search .gsst_b { - position: relative; - top: 10px; - left: -25px; - width: 1px; -} - -.gsst_a .gscb_a { - color: #c01a07 !important; -} - -.google-search input { - background-color: #fff !important; - font-family: Roboto, Helvetica, sans-serif; - font-size: 10pt !important; - padding-left: 5px !important; - float: right; - position: relative; - top: 8px; - width: 100% !important; - height: 30px !important; -} - -.google-search input:active { -} - -.google-search { - margin-right: 10px; - margin-left: 10px !important; - float: right !important; -} - -.google-search td, -.google-search table, -.google-search tr, -.google-search th { - background-color: #444444 !important; -} - -.google-search .gsc-input-box, -.google-search .gsc-input-box input { - border-radius: 3px !important; - width: 200px; -} - -.gsc-branding-text, -.gsc-branding-img, -.gsc-user-defined-text { - display: none !important; -} - -.google-search .gsc-input-box input { - font-size: 16px !important; -} - -.google-search .gsc-search-button { - display: none !important; -} - -.google-search .gsc-control-cse { - padding: 10px !important; -} - -.google-search > div { - float: left !important; - width: 200px !important; -} - -/* GOOGLE END */ - -.book-summary, -.book-body { - margin-top: 48px; -} - -.arangodb-logo, .arangodb-logo-small { - display: inline; - float: left; - padding-top: 12px; - margin-left: 10px; -} - -.arangodb-logo img { - height: 23px; -} - -.arangodb-logo-small { - display: none; -} - -.arangodb-version-switcher { - width: 65px; - height: 44px; - margin-left: 16px; - float: left; - display: inline; - font-weight: bold; - color: #fff; - background-color: inherit; - border: 0; -} - -.arangodb-version-switcher option { - background-color: white; - color: black; -} - - -.arangodb-header { - position: fixed; - width: 100%; - height: 48px; - z-index: 1; -} - -.arangodb-header .socialIcons-googlegroups a img { - position: relative; - height: 14px; - top: 3px; -} - -.arangodb-navmenu { - display: block; - float: right; - margin: 0; - padding: 0; -} - -.arangodb-navmenu li { - display: block; - float: left; -} - -.arangodb-navmenu li a { - display: block; - float: left; - padding: 0 10px; - line-height: 48px; - font-size: 16px; - font-weight: 400; - color: #fff; - text-decoration: none; - font-family: Roboto, Helvetica, sans-serif; -} - -.arangodb-navmenu li.active-tab a, .arangodb-navmenu li a:hover { - background-color: #88A049 !important; -} - -.downloadIcon { - margin-right: 10px; -} - -/** simple responsive updates **/ - -@media screen and (max-width: 1000px) { - .arangodb-navmenu li a { - padding: 0 6px; - } - - .arangodb-logo { - margin-left: 10px; - } - - .google-search { - margin-right: 5px !important; - } - - .downloadIcon { - margin-right: 0; - } - - .socialIcons { - display: none !important; - } -} - - -@media screen and (max-width: 800px) { - - .google-search, - .google-search .gsc-input-box, - .google-search .gsc-input-box input { - width: 130px !important; - } - - .arangodb-navmenu li a { - font-size: 15px; - padding: 0 7px; - } - - .arangodb-logo { - display: none; - } - - .arangodb-logo-small { - display: inline; - margin-left: 10px; - } - - .arangodb-logo-small img { - height: 20px; - } - - .arangodb-version-switcher { - margin: 0; - } - -} - -@media screen and (max-width: 600px) { - .arangodb-navmenu li a { - font-size: 15px; - padding: 0 7px; - } - - .arangodb-version-switcher, - .downloadIcon { - display: none !important; - } - - .google-search, - .google-search .gsc-input-box, - .google-search .gsc-input-box input { - width: 24px !important; - } - - .google-search .gsc-input-box input[style] { - background: url(https://docs.arangodb.com/assets/searchIcon.png) left center no-repeat rgb(255, 255, 255) !important; - } - - .google-search .gsc-input-box input:focus { - width: 200px !important; - position: relative; - left: -176px; - background-position: -9999px -9999px !important; - } - -} - -@media screen and (max-width: 400px) { - .arangodb-navmenu li a { - font-size: 13px; - padding: 0 5px; - } - .google-search { - display: none; - } -} - -/*Hubspot Cookie notice */ - -body div#hs-eu-cookie-confirmation { - bottom: 0; - top: auto; - position: fixed; - text-align: center !important; -} - -body div#hs-eu-cookie-confirmation.can-use-gradients { - background-image: linear-gradient(to bottom, rgba(255,255,255,0.9),rgba(255,255,255,0.75)); -} - -body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner { - display: inline-block; - padding: 15px 18px 0; -} - -body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner #hs-en-cookie-confirmation-buttons-area { - float: left; -} - -body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner a#hs-eu-confirmation-button { - background-color: #577138 !important; - border: none !important; - text-shadow: none !important; - box-shadow: none; - padding: 5px 15px !important; - margin-left: 10px; -} - -body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner > p { - float: left; - color: #000 !important; - text-shadow: none; -} diff --git a/Documentation/Books/HTTP/styles/header.js b/Documentation/Books/HTTP/styles/header.js deleted file mode 100644 index 9c90741c07d7..000000000000 --- a/Documentation/Books/HTTP/styles/header.js +++ /dev/null @@ -1,160 +0,0 @@ -// Try to set the version number early, jQuery not available yet -var searcheable_versions = [@BROWSEABLE_VERSIONS@]; -var cx = '@GSEARCH_ID@'; -document.addEventListener("DOMContentLoaded", function(event) { - if (!gitbook.state.root) return; - var bookVersion = gitbook.state.root.match(/\/(\d\.\d(\.\d)?|devel)\//); - var switcher = document.getElementsByClassName("arangodb-version-switcher")[0]; - if (bookVersion) { - switcher.value = bookVersion[1]; - } else { - switcher.style.display = "none"; - } -}); - -window.onload = function(){ -window.localStorage.removeItem(":keyword"); - -$(document).ready(function() { - -function appendHeader() { - var VERSION_SELECTOR = "" - var i = 0; - var prefix; - for (i = 0; i < searcheable_versions.length; i++ ) { - if (searcheable_versions[i] === 'devel') { - prefix = ''; - } else { - prefix = 'v'; - } - VERSION_SELECTOR += '\n'; - } - - var div = document.createElement('div'); - div.innerHTML = '
\n' + - ' \n' + - '
\n' + - ' \n' + - ' \n' + - ' \n' + - '
\n' + - ' \n' + - ' \n' + - ' \n' + - '
\n'; - - $('.book').before(div.innerHTML); - - }; - - - function rerenderNavbar() { - $('.arangodb-header').remove(); - appendHeader(); - }; - - //render header - rerenderNavbar(); - function addGoogleSrc() { - var gcse = document.createElement('script'); - gcse.type = 'text/javascript'; - gcse.async = true; - gcse.src = (document.location.protocol == 'https:' ? 'https:' : 'http:') + - '//cse.google.com/cse.js?cx=' + cx; - var s = document.getElementsByTagName('script')[0]; - s.parentNode.insertBefore(gcse, s); - }; - addGoogleSrc(); - - $(".arangodb-navmenu a[data-book]").on("click", function(e) { - e.preventDefault(); - var urlSplit = gitbook.state.root.split("/"); - urlSplit.pop(); // "" - urlSplit.pop(); // e.g. "Manual" - window.location.href = urlSplit.join("/") + "/" + e.target.getAttribute("data-book") + "/index.html"; - }); - - // set again using jQuery to accommodate non-standard browsers (*cough* IE *cough*) - var bookVersion = gitbook.state.root.match(/\/(\d\.\d(\.\d)?|devel)\//); - var switcher = $(".arangodb-version-switcher"); - if (bookVersion) { - switcher.val(bookVersion[1]); - } else { - switcher.hide(); - } - - $(".arangodb-version-switcher").on("change", function(e) { - var urlSplit = gitbook.state.root.split("/"); - urlSplit.pop(); // "" - var currentBook = urlSplit.pop(); // e.g. "Manual" - urlSplit.pop() // e.g. "3.0" - if (e.target.value == "2.8") { - var legacyMap = { - "Manual": "", - "AQL": "/Aql", - "HTTP": "/HttpApi", - "Cookbook": "/Cookbook" - }; - currentBook = legacyMap[currentBook]; - } else { - currentBook = "/" + currentBook; - } - window.location.href = urlSplit.join("/") + "/" + e.target.value + currentBook + "/index.html"; - }); - -}); - -}; diff --git a/Documentation/Books/HTTP/styles/hs.js b/Documentation/Books/HTTP/styles/hs.js deleted file mode 100644 index 9a8ae18a61d2..000000000000 --- a/Documentation/Books/HTTP/styles/hs.js +++ /dev/null @@ -1,33 +0,0 @@ -// HubSpot Script Loader. Please do not block this resource. See more: http://hubs.ly/H0702_H0 - -(function (id, src, attrs) { - if (document.getElementById(id)) { - try { console.warn('duplicate hubspot script with id: "' + id + '" included on page'); } - finally { return; } - } - var js = document.createElement('script'); - js.src = src; - js.type = 'text/javascript'; - js.id = id; - for (var name in attrs) { if(attrs.hasOwnProperty(name)) { js.setAttribute(name, attrs[name]); } } - var e = document.getElementsByTagName('script')[0]; - e.parentNode.insertBefore(js, e); -})('hubspot-messages-loader', 'https://js.usemessages.com/messageswidgetshell.js', {"data-loader":"hs-scriptloader","data-hsjs-portal":2482448,"data-hsjs-env":"prod"}); - -(function (id, src) { - if (document.getElementById(id)) { return; } - var js = document.createElement('script'); - js.src = src; - js.type = 'text/javascript'; - js.id = id; - var e = document.getElementsByTagName('script')[0]; - e.parentNode.insertBefore(js, e); -})('hs-analytics', '//js.hs-analytics.net/analytics/1508760300000/2482448.js'); - -window.setTimeout(function () { - $('body').on('click', 'a', function () { - var _hsq = window._hsq = window._hsq || []; - _hsq.push(['setPath', window.location.pathname]); - _hsq.push(['trackPageView']); - }); -}, 1000); diff --git a/Documentation/Books/HTTP/styles/website.css b/Documentation/Books/HTTP/styles/website.css deleted file mode 100644 index 0bbc2f1eff37..000000000000 --- a/Documentation/Books/HTTP/styles/website.css +++ /dev/null @@ -1,84 +0,0 @@ -.markdown-section small { - font-size: 80%; -} -.markdown-section sub, .markdown-section sup { - font-size: 75%; - line-height: 0; - position: relative; - vertical-align: baseline; -} -.markdown-section sup { - top: -.5em; -} -.markdown-section sub { - bottom: -.25em; -} - -div.example_show_button { - border: medium solid lightgray; - text-align: center; - position: relative; - top: -10px; - display: flex; - justify-content: center; -} - -.book .book-body .navigation.navigation-next { - right: 10px !important; -} - -.book .book-summary ul.summary li.active>a,.book .book-summary ul.summary li a:hover { - color: #fff !important; - background: #80A54D !important; - text-decoration: none; -} - -.book .book-body .page-wrapper .page-inner section.normal .deprecated{ - background-color: rgba(240,240,0,0.4); -} - -.book .book-body section > ul li:last-child { - margin-bottom: 0.85em; -} - -.book .book-body .alert p:last-child { - margin-bottom: 0; -} - -.columns-3 { - -webkit-column-count: 3; - -moz-column-count: 3; - -ms-column-count: 3; - -o-column-count: 3; - column-count: 3; - columns: 3; -} - -.localized-footer { - opacity: 0.5; -} - -.example-container { - position: relative; -} - -.example-container a.anchorjs-link { - position: absolute; - top: 10px; - right: 10px; - font: 1em/1 anchorjs-icons; -} - -.gsib_a { -padding: 0px !important; -} - -.gsc-control-cse { -border: 0px !important; -background-color: transparent !important; -} - - -.gsc-input { -margin: 0px !important; -} diff --git a/Documentation/Books/Manual/.gitkeep b/Documentation/Books/Manual/.gitkeep new file mode 100644 index 000000000000..936ca3adc4e3 --- /dev/null +++ b/Documentation/Books/Manual/.gitkeep @@ -0,0 +1,5 @@ +Git can not track empty repositories. +This file ensures that the directory is kept. + +Some of the old documentation building scripts are still +used by the new system which copy files into this folder. \ No newline at end of file diff --git a/Documentation/Books/Manual/Administration/ActiveFailover/README.md b/Documentation/Books/Manual/Administration/ActiveFailover/README.md deleted file mode 100644 index abbef3d4c123..000000000000 --- a/Documentation/Books/Manual/Administration/ActiveFailover/README.md +++ /dev/null @@ -1,89 +0,0 @@ -Active Failover Administration -============================== - -This _Section_ includes information related to the administration of an _Active Failover_ -setup. - -For a general introduction to the ArangoDB _Active Failover_ setup, please refer -to the _Active Failover_ [chapter](../../Architecture/DeploymentModes/ActiveFailover/README.md). - -Introduction ------------- - -The _Active Failover_ setup requires almost no manual administration. - -You may still need to replace, upgrade or remove individual nodes -in an _Active Failover_ setup. - - -Determining the current _Leader_ --------------------------------- - -It is possible to determine the _leader_ by asking any of the involved single-server -instances. Just send a request to the `/_api/cluster/endpoints` REST API. - -```bash -curl http://server.domain.org:8530/_api/cluster/endpoints -{ - "error": false, - "code": 200, - "endpoints": [ - { - "endpoint": "tcp://[::1]:8530" - }, - { - "endpoint": "tcp://[::1]:8531" - } - ] -} -``` - -This API will return you all available endpoints, the first endpoint is defined to -be the current _Leader_. This endpoint is always available and will not be blocked -with a `HTTP/1.1 503 Service Unavailable` response on a _Follower_ - -Reading from Follower ---------------------- - -Followers in the active-failover setup are in a read-only mode. It is possible to read from these -followers by adding a `X-Arango-Allow-Dirty-Read: true` header on each request. Responses will then automatically -contain the `X-Arango-Potential-Dirty-Read` header so that clients can reject accidental dirty reads. - -Depending on the driver support for your specific programming language, you should be able to enable this option. - -Upgrading / Replacing / Removing a _Leader_ -------------------------------------------- - -A _Leader_ is the active server which can receive all read and write operations -in an _Active-Failover_ setup. - -Upgrading or removing a _Leader_ can be a little tricky, because as soon as you -stop the leader's process you will trigger a failover situation. This can be intended -here, but you will probably want to halt all writes to the _leader_ for a certain -amount of time to allow the _follower_ to catch up on all operations. - -After you have ensured that the _follower_ is sufficiently caught up, you can -stop the _leader_ process via the shutdown API or by sending a `SIGTERM` signal -to the process (i.e. `kill `). This will trigger an orderly shutdown, -and should trigger an immediate switch to the _follower_. If your client drivers -are configured correctly, you should notice almost no interruption in your -applications. - -Once you upgraded the local server via the `--database.auto-upgrade` option, -you can add it again to the _Active Failover_ setup. The server will resync automatically -with the new _Leader_ and become a _Follower_. - -Upgrading / Replacing / Removing a _Follower_ ---------------------------------------------- - -A _Follower_ is the passive server which tries to mirror all the data stored in -the _Leader_. - -To upgrade a _follower_ you only need to stop the process and start it -with `--database.auto-upgrade`. The server process will automatically resync -with the master after a restart. - -The clean way of removing a _Follower_ is to first start a replacement _Follower_ -(otherwise you will lose resiliency). To start a _Follower_ please have a look -into our [deployment guide](../../Deployment/ActiveFailover/README.md). -After you have your replacement ready you can just kill the process and remove it. diff --git a/Documentation/Books/Manual/Administration/Cluster/README.md b/Documentation/Books/Manual/Administration/Cluster/README.md deleted file mode 100644 index a0294aab37f7..000000000000 --- a/Documentation/Books/Manual/Administration/Cluster/README.md +++ /dev/null @@ -1,206 +0,0 @@ -Cluster Administration -====================== - -This _Section_ includes information related to the administration of an ArangoDB Cluster. - -For a general introduction to the ArangoDB Cluster, please refer to the -[Cluster](../../Architecture/DeploymentModes/Cluster/README.md) chapter. - -There is also a detailed -[Cluster Administration Course](https://www.arangodb.com/arangodb-cluster-course/) -for download. - -Please check the following talks as well: - -| # | Date | Title | Who | Link | -|---|-----------------|-----------------------------------------------------------------------------|-----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| -| 1 | 10th April 2018 | Fundamentals and Best Practices of ArangoDB Cluster Administration | Kaveh Vahedipour, ArangoDB Cluster Team | [Online Meetup Page](https://www.meetup.com/online-ArangoDB-meetup/events/248996022/) & [Video](https://www.youtube.com/watch?v=RQ33fkgUg64) | -| 2 | 29th May 2018 | Fundamentals and Best Practices of ArangoDB Cluster Administration: Part II | Kaveh Vahedipour, ArangoDB Cluster Team | [Online Meetup Page](https://www.meetup.com/online-ArangoDB-meetup/events/250869684/) & [Video](https://www.youtube.com/watch?v=jj7YpTaL3pI) | - - -Enabling synchronous replication --------------------------------- - -For an introduction about _Synchronous Replication_ in Cluster, please refer -to the [_Cluster Architecture_](../../Architecture/DeploymentModes/Cluster/Architecture.md#synchronous-replication) section. - -Synchronous replication can be enabled per _collection_. When creating a -_collection_ you may specify the number of _replicas_ using the -*replicationFactor* parameter. The default value is set to `1` which -effectively *disables* synchronous replication among _DBServers_. - -Whenever you specify a _replicationFactor_ greater than 1 when creating a -collection, synchronous replication will be activated for this collection. -The Cluster will determine suitable _leaders_ and _followers_ for every -requested _shard_ (_numberOfShards_) within the Cluster. - -Example: - -``` -127.0.0.1:8530@_system> db._create("test", {"replicationFactor": 3}) -``` - -In the above case, any write operation will require 3 replicas to -report success from now on. - -Preparing growth ----------------- - -You may create a _collection_ with higher _replication factor_ than -available _DBServers_. When additional _DBServers_ become available -the _shards_ are automatically replicated to the newly available _DBServers_. - -To create a _collection_ with higher _replication factor_ than -available _DBServers_ please set the option _enforceReplicationFactor_ to _false_, -when creating the collection from _ArangoShell_ (the option is not available -from the web interface), e.g.: - -``` -db._create("test", { replicationFactor: 4 }, { enforceReplicationFactor: false }); -``` - -The default value for _enforceReplicationFactor_ is true. - -**Note:** multiple _replicas_ of the same _shard_ can never coexist on the same -_DBServer_ instance. - -Sharding --------- - -For an introduction about _Sharding_ in Cluster, please refer to the -[_Cluster Architecture_](../../Architecture/DeploymentModes/Cluster/Architecture.md#sharding) section. - -Number of _shards_ can be configured at _collection_ creation time, e.g. the UI, -or the _ArangoDB Shell_: - -``` -127.0.0.1:8529@_system> db._create("sharded_collection", {"numberOfShards": 4}); -``` - -To configure a custom _hashing_ for another attribute (default is __key_): - -``` -127.0.0.1:8529@_system> db._create("sharded_collection", {"numberOfShards": 4, "shardKeys": ["country"]}); -``` - -The example above, where 'country' has been used as _shardKeys_ can be useful -to keep data of every country in one shard, which would result in better -performance for queries working on a per country base. - -It is also possible to specify multiple `shardKeys`. - -Note however that if you change the shard keys from their default `["_key"]`, then finding -a document in the collection by its primary key involves a request to -every single shard. Furthermore, in this case one can no longer prescribe -the primary key value of a new document but must use the automatically -generated one. This latter restriction comes from the fact that ensuring -uniqueness of the primary key would be very inefficient if the user -could specify the primary key. - -On which DBServer in a Cluster a particular _shard_ is kept is undefined. -There is no option to configure an affinity based on certain _shard_ keys. - -Unique indexes (hash, skiplist, persistent) on sharded collections are -only allowed if the fields used to determine the shard key are also -included in the list of attribute paths for the index: - -| shardKeys | indexKeys | | -|----------:|----------:|------------:| -| a | a | allowed | -| a | b | not allowed | -| a | a, b | allowed | -| a, b | a | not allowed | -| a, b | b | not allowed | -| a, b | a, b | allowed | -| a, b | a, b, c | allowed | -| a, b, c | a, b | not allowed | -| a, b, c | a, b, c | allowed | - -Sharding strategy ------------------ - -strategy to use for the collection. Since ArangoDB 3.4 there are -different sharding strategies to select from when creating a new -collection. The selected *shardingStrategy* value will remain -fixed for the collection and cannot be changed afterwards. This is -important to make the collection keep its sharding settings and -always find documents already distributed to shards using the same -initial sharding algorithm. - -The available sharding strategies are: -- `community-compat`: default sharding used by ArangoDB - Community Edition before version 3.4 -- `enterprise-compat`: default sharding used by ArangoDB - Enterprise Edition before version 3.4 -- `enterprise-smart-edge-compat`: default sharding used by smart edge - collections in ArangoDB Enterprise Edition before version 3.4 -- `hash`: default sharding used for new collections starting from version 3.4 - (excluding smart edge collections) -- `enterprise-hash-smart-edge`: default sharding used for new - smart edge collections starting from version 3.4 - -If no sharding strategy is specified, the default will be `hash` for -all collections, and `enterprise-hash-smart-edge` for all smart edge -collections (requires the *Enterprise Edition* of ArangoDB). -Manually overriding the sharding strategy does not yet provide a -benefit, but it may later in case other sharding strategies are added. - - -Moving/Rebalancing _shards_ ---------------------------- - -A _shard_ can be moved from a _DBServer_ to another, and the entire shard distribution -can be rebalanced using the correponding buttons in the web [UI](../../Programs/WebInterface/Cluster.md). - -Replacing/Removing a _Coordinator_ ----------------------------------- - -_Coordinators_ are effectively stateless and can be replaced, added and -removed without more consideration than meeting the necessities of the -particular installation. - -To take out a _Coordinator_ stop the -_Coordinator_'s instance by issueing `kill -SIGTERM `. - -Ca. 15 seconds later the cluster UI on any other _Coordinator_ will mark -the _Coordinator_ in question as failed. Almost simultaneously, a trash bin -icon will appear to the right of the name of the _Coordinator_. Clicking -that icon will remove the _Coordinator_ from the coordinator registry. - -Any new _Coordinator_ instance that is informed of where to find any/all -agent/s, `--cluster.agency-endpoint` `` will be -integrated as a new _Coordinator_ into the cluster. You may also just -restart the _Coordinator_ as before and it will reintegrate itself into -the cluster. - -Replacing/Removing a _DBServer_ -------------------------------- - -_DBServers_ are where the data of an ArangoDB cluster is stored. They -do not publish a web UI and are not meant to be accessed by any other -entity than _Coordinators_ to perform client requests or other _DBServers_ -to uphold replication and resilience. - -The clean way of removing a _DBServer_ is to first relieve it of all -its responsibilities for shards. This applies to _followers_ as well as -_leaders_ of shards. The requirement for this operation is that no -collection in any of the databases has a `relicationFactor` greater or -equal to the current number of _DBServers_ minus one. For the purpose of -cleaning out `DBServer004` for example would work as follows, when -issued to any _Coordinator_ of the cluster: - -`curl /_admin/cluster/cleanOutServer -d '{"server":"DBServer004"}'` - -After the _DBServer_ has been cleaned out, you will find a trash bin -icon to the right of the name of the _DBServer_ on any _Coordinators_' -UI. Clicking on it will remove the _DBServer_ in question from the -cluster. - -Firing up any _DBServer_ from a clean data directory by specifying the -any of all agency endpoints will integrate the new _DBServer_ into the -cluster. - -To distribute shards onto the new _DBServer_ either click on the -`Distribute Shards` button at the bottom of the `Shards` page in every -database. - diff --git a/Documentation/Books/Manual/Administration/Configuration/README.md b/Documentation/Books/Manual/Administration/Configuration/README.md deleted file mode 100644 index 7e43018ff6c4..000000000000 --- a/Documentation/Books/Manual/Administration/Configuration/README.md +++ /dev/null @@ -1,286 +0,0 @@ -# Configuration - -The [programs and tools](../../Programs/README.md) shipped in an -ArangoDB package can be configured with various _startup options_. - -- Startup options you specify on a command line are referred to as - [command line options](#command-line-options): - - `arangosh --server.database myDB` - -- The same options can also be set via - [configuration files](#configuration-file-format), - using a slightly different syntax: - - `server.database = myDB` - -- There are also _flags_ which are for command line usage only, - such as `‑‑help` and `‑‑version`. They don't take any value - in contrast to options. - -Find the available options and flags in the _Options_ sub-chapters of the -respective [Programs & Tools](../../Programs/README.md) sub-chapter, like the -[ArangoDB Server Options](../../Programs/Arangod/Options.md). - -The [ArangoDB Starter](../../Programs/Starter/README.md) works differently -to the other programs and tools. It uses `setup.json` files for its own -[configuration](../../Programs/Starter/Architecture.md#starter-data-directory) -and has a fluent command line interface to execute certain actions. -If you deploy ArangoDB with the Starter, then custom `arangod.conf` files -are generated by this tool and are used instead of the default configuration. - -## Command line options - -Command line options can be supplied in the style `‑‑option value` with two -dashes (also known as hyphen minus), the name of the option, a space as -separator and the value. You may also use an equals sign `=` as separator -like `‑‑option=value`. - -The value can be surrounded with double quote marks `"` like -`‑‑option="value"`. This is mandatory if the value contains spaces, -but it is optional otherwise. - -Some binaries accept one unnamed argument, which means you can take a -shortcut and leave out the `‑‑option` part and supply the value directly. -It does not matter if you supply it as first or last argument, or between -any of the named arguments. For _arangod_ it is the `‑‑database.directory` -option. The following commands are identical: - -``` -arangod my_data_dir -arangod "my_data_dir" -arangod --database.directory my_data_dir -arangod --database.directory=my_data_dir -arangod --database.directory "my_data_dir" -arangod --database.directory="my_data_dir" -``` - -Many options belong to a section as in `‑‑section.param`, e.g. -`‑‑server.database`, but there can also be options without any section. -These options are referred to as _global options_. - -To list available options, you can run a binary with the `‑‑help` flag: - -``` -arangosh --help -``` - -To list the options of a certain section only, use `‑‑help‑{section}` -like `‑‑help‑server`. To list all options including hidden ones use -`‑‑help‑.`. - -## Configuration file format - -`.conf` files for ArangoDB binaries are in a simple key-value pair format. -Each option is specified on a separate line in the form: - -```conf -key = value -``` - -It may look like this: - -```conf -server.endpoint = tcp://127.0.0.1:8529 -server.authentication = true -``` - -Alternatively, a header section can be specified and options pertaining to -that section can be specified in a shorter form: - -```conf -[server] -endpoint = tcp://127.0.0.1:8529 -authentication = true -``` - -So you see, a command line option `‑‑section.param value` can be easily -translated to an option in a configuration file: - -```js -[section] -param = value -``` - -{% hint 'tip' %} -Whitespace around `=` is ignored in configuration files. -This includes whitespace around equality signs in the parameter value: - -```conf -log.level = startup = trace -``` - -It is the same as without whitespace: - -```conf -log.level=startup=trace -``` -{% endhint %} - -Comments can be placed in the configuration file by placing one or more -hash symbols `#` at the beginning of a line. - -Only command line options with a value should be set within the configuration -file. Command line options which act as flags should only be entered on the -command line when starting the server. - -## Using Configuration Files - -For each binary (except `arangodb`, which is the _Starter_) there is a -corresponding `.conf` file that an ArangoDB package ships with. -`arangosh.conf` contains the default ArangoShell configuration for instance. -The configuration files can be adjusted or new ones be created. - -To load a particular configuration file, there is a `‑‑configuration` option -available to let you specify a path to a `.conf` file. If you want to -completely ignore a configuration file (likely the default one) without -necessarily deleting the file, then add the command line option - -``` --c none -``` - -or - -``` ---configuration none -``` - -The value *none* is case-insensitive. - - - -## Environment variables as parameters - -If you want to use an environment variable in a value of a startup option, -write the name of the variable wrapped in at signs `@`. It acts as a -placeholder. It can be combined with fixed strings for instance. - -Command line example: - -``` -arangod --temp.path @TEMP@/arango_tmp -``` - -In a configuration file: - -``` -[temp] -path = @TEMP@/arango_tmp -``` - -On a Windows system, above setting would typically make the ArangoDB Server -create its folder for temporary files in `%USERPROFILE%\AppData\Local\Temp`, -i.e. `C:\Users\xxx\AppData\Local\Temp\arango_tmp`. - - - -## Options with multiple values - -Certain startup options accept multiple values. In case of parameters being -_vectors_ you can specify one or more times the option with varying values. -Whether this is the case can be seen by looking at the **Type** column of a -tool's option table (e.g. [ArangoDB Server Options](../../Programs/Arangod/Options.md)) -or the type information provided on a command line in the `--help` output of -an ArangoDB binary: - -``` ---log.level the global or topic-specific log level -``` - -Vectors can be identified by the three dots `...` at the end of the data type -information (in angled brackets). For `log.level` you can set one or more -strings for different log levels for example. Simply repeat the option to -do so. On a command line: - -``` -arangod --log.level warning --log.level queries=trace --log.level startup=info -``` - -This sets a global log level of `warning` and two topic-specific levels -(`trace` for queries and `info` for startup). The same in a configuration file: - -```conf -[log] -level = warning -level = queries=trace -level = startup=info -``` - -## Configuration precedence - -There are built-in defaults, with which all configuration variables are first -initialized. They can be overridden by configuration files and command line -options (in this order). Only a fraction of all available options are set in -the configuration files that ArangoDB ships with. Many options will therefore -fall back to the built-in defaults unless they are overridden by the user. - -It is common to use modified configuration files together with startup -options on a command line to override specific settings. Command line options -take precedence over values set in a configuration file. - -If the same option is set multiple times, but only supports a single value, -then the last occurrence of the option will become the final value. -For example, if you edit `arangosh.conf` to set: - -``` -server.database = myDB1 -server.database = myDB2 -``` - -… and start ArangoShell like: - -``` -arangosh --server.database myDB3 --server.database myDB4 -``` - -… then the database it will connect to is `myDB4`, because this startup option -takes a single value only (i.e. it is not a vector), the built-in default -is `_system` but the configuration file overrules the setting. It gets set to -`myDB1` temporarily before it is replaced by `myDB2`, which in turn gets -overridden by the command line options twice, first to `myDB3` and then the -final value `myDB4`. - -## Change configuration at runtime - -In general, supplied startup options can not be changed nor can configuration -files be reloaded once an executable is started, other than by restarting the -executable with different options. However, some of the startup options -define default values which can be overridden on a per-query basis for -instance, or adjusted at runtime via an API call. Examples: - -- [Query cache configuration](../../../AQL/ExecutionAndPerformance/QueryCache.html#global-configuration) - via JavaScript API -- [Change WAL settings](../../../HTTP/MiscellaneousFunctions/index.html#configures-the-write-ahead-log) - via an HTTP API request - -## Fetch Current Configuration Options - -To list the configuration options of a running `arangod` instance, you can -connect with an [ArangoShell](../../Programs/Arangosh/README.md) and invoke a -[Transaction](../../Transactions/README.md) by calling `db._executeTransaction()` -and providing a JavaScript function to retrieve the server options: - - @startDocuBlockInline listCurrentConfigOpts - @EXAMPLE_ARANGOSH_OUTPUT{listCurrentConfigOpts} - db._executeTransaction({ collections: {}, action: function() {return require("internal").options(); } }) - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock listCurrentConfigOpts diff --git a/Documentation/Books/Manual/Administration/DC2DC/README.md b/Documentation/Books/Manual/Administration/DC2DC/README.md deleted file mode 100644 index 66cba252740f..000000000000 --- a/Documentation/Books/Manual/Administration/DC2DC/README.md +++ /dev/null @@ -1,150 +0,0 @@ - -# Datacenter to datacenter replication administration - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -This Section includes information related to the administration of the _datacenter -to datacenter replication_. - -For a general introduction to the _datacenter to datacenter replication_, please -refer to the [Datacenter to datacenter replication](../../Architecture/DeploymentModes/DC2DC/README.md) -chapter. - -## Starting synchronization - -Once all components of the _ArangoSync_ solution have been deployed and are -running properly, _ArangoSync_ will not automatically replicate database structure -and content. For that, it is is needed to configure synchronization. - -To configure synchronization, you need the following: - -- The endpoint of the sync master in the target datacenter. -- The endpoint of the sync master in the source datacenter. -- A certificate (in keyfile format) used for client authentication of the sync master - (with the sync master in the source datacenter). -- A CA certificate (public key only) for verifying the integrity of the sync masters. -- A username+password pair (or client certificate) for authenticating the configure - require with the sync master (in the target datacenter) - -With that information, run: - -```bash -arangosync configure sync \ - --master.endpoint= \ - --master.keyfile= \ - --source.endpoint= \ - --source.cacert= \ - --auth.user= \ - --auth.password= -``` - -The command will finish quickly. Afterwards it will take some time until -the clusters in both datacenters are in sync. - -## Inspect status - -Use the following command to inspect the status of the synchronization of a datacenter: - -```bash -arangosync get status \ - --master.endpoint= \ - --auth.user= \ - --auth.password= \ - -v -``` - -Note: Invoking this command on the target datacenter will return different results from -invoking it on the source datacenter. You need insight in both results to get a "complete picture". - -Where the `get status` command gives insight in the status of synchronization, there -are more detailed commands to give insight in tasks & registered workers. - -Use the following command to get a list of all synchronization tasks in a datacenter: - -```bash -arangosync get tasks \ - --master.endpoint= \ - --auth.user= \ - --auth.password= \ - -v -``` - -Use the following command to get a list of all masters in a datacenter and know which master is the current leader: - -```bash -arangosync get masters \ - --master.endpoint= \ - --auth.user= \ - --auth.password= \ - -v -``` - -Use the following command to get a list of all workers in a datacenter: - -```bash -arangosync get workers \ - --master.endpoint= \ - --auth.user= \ - --auth.password= \ - -v -``` - -## Stopping synchronization - -If you no longer want to synchronize data from a source to a target datacenter -you must stop it. To do so, run the following command: - -```bash -arangosync stop sync \ - --master.endpoint= \ - --auth.user= \ - --auth.password= -``` - -The command will first ensure that all shards in the receiving cluster are -completely in-sync with the shards in the sending cluster. -In order to achieve that, the sending cluster will be switched to read/only mode. -After the synchronization has stopped, the sending cluster will be switched -back to read/write mode. - -The command will then wait until synchronization has completely stopped before returning. -If the synchronization is not completely stopped within a reasonable period (2 minutes by default) -the command will fail. - -If you do not want to wait for all shards in the receiving cluster to be -completely in-sync with the shards in the sending cluster, add an `--ensure-in-sync=false` -argument to the `stop sync` command. - -If the source datacenter is no longer available it is not possible to stop synchronization in -a graceful manner. If that happens abort the synchronization with the following command: - -```bash -arangosync abort sync \ - --master.endpoint= \ - --auth.user= \ - --auth.password= -``` - -If the source datacenter recovers after an `abort sync` has been executed, it is -needed to "cleanup" ArangoSync in the source datacenter. -To do so, execute the following command: - -```bash -arangosync abort outgoing sync \ - --master.endpoint= \ - --auth.user= \ - --auth.password= -``` - -## Reversing synchronization direction - -If you want to reverse the direction of synchronization (e.g. after a failure -in datacenter A and you switched to the datacenter B for fallback), you -must first stop (or abort) the original synchronization. - -Once that is finished (and cleanup has been applied in case of abort), -you must now configure the synchronization again, but with swapped -source & target settings. diff --git a/Documentation/Books/Manual/Administration/Engine/SwitchEngine.md b/Documentation/Books/Manual/Administration/Engine/SwitchEngine.md deleted file mode 100644 index 1d341fcef538..000000000000 --- a/Documentation/Books/Manual/Administration/Engine/SwitchEngine.md +++ /dev/null @@ -1,24 +0,0 @@ -Switching the storage engine ----------------------------- - -In order to use a different storage engine with an existing data directory, -it is required to first create a logical backup of the data using the -tool [_arangodump_](../../Programs/Arangodump/README.md). - -After that, the _arangod_ server process should be restarted with the desired storage -engine selected (this can be done by setting the option *--server.storage-engine*, -or by editing the configuartion file of the server) and using a **non-existing data directory**. -If you have deployed using the [_Starter_](../../Programs/Starter/README.md), -instead of _arangod_ you will need to run _arangodb_, and pass to it the option -*--server.storage-engine* and the option *--starter.data-dir* to set a new -data directory. - -When the server is up and running with the desired storage engine, the data -can be re-imported using the tool -[_arangorestore_](../../Programs/Arangorestore/README.md). - -{% hint 'tip' %} -For a list of available storage engines, and more information on their -differences, please refer to the [Storage Engines](../../Architecture/StorageEngines.md) -page under the [Architecture](../../Architecture/README.md) chapter. -{% endhint %} diff --git a/Documentation/Books/Manual/Administration/ImportExport.md b/Documentation/Books/Manual/Administration/ImportExport.md deleted file mode 100644 index 3aff87ce7c17..000000000000 --- a/Documentation/Books/Manual/Administration/ImportExport.md +++ /dev/null @@ -1,16 +0,0 @@ -Import and Export -================= - -Imports and exports can be done with the tools -[_arangoimport_](../Programs/Arangoimport/README.md) and -[_arangoexport_](../Programs/Arangoexport/README.md). - - - - - - - - - - diff --git a/Documentation/Books/Manual/Administration/ManagingUsers/InArangosh.md b/Documentation/Books/Manual/Administration/ManagingUsers/InArangosh.md deleted file mode 100644 index af4dfb75ebfb..000000000000 --- a/Documentation/Books/Manual/Administration/ManagingUsers/InArangosh.md +++ /dev/null @@ -1,292 +0,0 @@ -Managing Users in the ArangoDB Shell -==================================== - -Please note, that for backward compatibility the server access levels -follow from the database access level on the database *_system*. - -Also note that the server and database access levels are represented as - -* `rw`: for *Administrate* -* `ro`: for *Access* -* `none`: for *No access* - -This is again for backward compatibility. - -**Example** - -Fire up *arangosh* and require the users module. Use it to create a new user: - -``` -arangosh> const users = require('@arangodb/users'); -arangosh> users.save('JohnSmith', 'mypassword'); -``` - -Creates a user called *JohnSmith*. This user will have no access at all. - -``` -arangosh> users.grantDatabase('JohnSmith', 'testdb', 'rw'); -``` - -This grants the user *Administrate* access to the database -*testdb*. `revokeDatabase` will revoke this access level setting. - -**Note**: Be aware that from 3.2 onwards the `grantDatabase` will not -automatically grant users the access level to write or read collections in a -database. If you grant access to a database `testdb` you will -additionally need to explicitly grant access levels to individual -collections via `grantCollection`. - -The upgrade procedure from 3.1 to 3.2 sets the wildcard database access -level for all users to *Administrate* and sets the wildcard collection -access level for all user/database pairs to *Read/Write*. - -``` -arangosh> users.grantCollection('JohnSmith', 'testdb', 'testcoll', 'rw'); -``` - -Save ----- - -`users.save(user, passwd, active, extra)` - -This will create a new ArangoDB user. The user name must be specified in *user* -and must not be empty. - -The password must be given as a string, too, but can be left empty if -required. If you pass the special value *ARANGODB_DEFAULT_ROOT_PASSWORD*, the -password will be set the value stored in the environment variable -`ARANGODB_DEFAULT_ROOT_PASSWORD`. This can be used to pass an instance -variable into ArangoDB. For example, the instance identifier from Amazon. - -If the *active* attribute is not specified, it defaults to *true*. The *extra* -attribute can be used to save custom data with the user. - -This method will fail if either the user name or the passwords are not -specified or given in a wrong format, or there already exists a user with the -specified name. - -**Note**: The user will not have permission to access any database. You need -to grant the access rights for one or more databases using -[grantDatabase](#grant-database). - -*Examples* - - @startDocuBlockInline USER_02_saveUser - @EXAMPLE_ARANGOSH_OUTPUT{USER_02_saveUser} - require('@arangodb/users').save('my-user', 'my-secret-password'); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock USER_02_saveUser - -Grant Database --------------- - -`users.grantDatabase(user, database, type)` - -This grants *type* ('rw', 'ro' or 'none') access to the *database* for -the *user*. If *database* is `"*"`, this sets the wildcard database access -level for the user *user*. - -The server access level follows from the access level for the database -`_system`. - -Revoke Database ---------------- - -`users.revokeDatabase(user, database)` - -This clears the access level setting to the *database* for the *user* and -the wildcard database access setting for this user kicks in. In case no wildcard -access was defined the default is *No Access*. This will also -clear the access levels for all the collections in this database. - -Grant Collection ----------------- - -`users.grantCollection(user, database, collection, type)` - -This grants *type* ('rw', 'ro' or 'none') access level to the *collection* -in *database* for the *user*. If *collection* is `"*"` this sets the -wildcard collection access level for the user *user* in database -*database*. - -Revoke Collection ------------------ - -`users.revokeCollection(user, database)` - -This clears the access level setting to the collection *collection* for the -user *user*. The system will either fallback to the wildcard collection access -level or default to *No Access* - -Replace -------- - -`users.replace(user, passwd, active, extra)` - -This will look up an existing ArangoDB user and replace its user data. - -The username must be specified in *user*, and a user with the specified name -must already exist in the database. - -The password must be given as a string, too, but can be left empty if required. - -If the *active* attribute is not specified, it defaults to *true*. The -*extra* attribute can be used to save custom data with the user. - -This method will fail if either the user name or the passwords are not specified -or given in a wrong format, or if the specified user cannot be found in the -database. - -**Note**: this function will not work from within the web interface - -*Examples* - - @startDocuBlockInline USER_03_replaceUser - @EXAMPLE_ARANGOSH_OUTPUT{USER_03_replaceUser} - require("@arangodb/users").replace("my-user", "my-changed-password"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock USER_03_replaceUser - -Update ------- - -`users.update(user, passwd, active, extra)` - -This will update an existing ArangoDB user with a new password and other data. - -The user name must be specified in *user* and the user must already exist in -the database. - -The password must be given as a string, too, but can be left empty if required. - -If the *active* attribute is not specified, the current value saved for the -user will not be changed. The same is true for the *extra* attribute. - -This method will fail if either the user name or the passwords are not specified -or given in a wrong format, or if the specified user cannot be found in the -database. - -*Examples* - - @startDocuBlockInline USER_04_updateUser - @EXAMPLE_ARANGOSH_OUTPUT{USER_04_updateUser} - require("@arangodb/users").update("my-user", "my-secret-password"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock USER_04_updateUser - -isValid -------- - -`users.isValid(user, password)` - -Checks whether the given combination of user name and password is valid. The -function will return a boolean value if the combination of user name and password -is valid. - -Each call to this function is penalized by the server sleeping a random -amount of time. - -*Examples* - - @startDocuBlockInline USER_05_isValidUser - @EXAMPLE_ARANGOSH_OUTPUT{USER_05_isValidUser} - require("@arangodb/users").isValid("my-user", "my-secret-password"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock USER_05_isValidUser - -Remove ------- - -`users.remove(user)` - -Removes an existing ArangoDB user from the database. - -The user name must be specified in *User* and the specified user must exist in -the database. - -This method will fail if the user cannot be found in the database. - -*Examples* - - @startDocuBlockInline USER_07_removeUser - @EXAMPLE_ARANGOSH_OUTPUT{USER_07_removeUser} - require("@arangodb/users").remove("my-user"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock USER_07_removeUser - -Document --------- - -`users.document(user)` - -Fetches an existing ArangoDB user from the database. - -The user name must be specified in *user*. - -This method will fail if the user cannot be found in the database. - -*Examples* - - @startDocuBlockInline USER_04_documentUser - @EXAMPLE_ARANGOSH_OUTPUT{USER_04_documentUser} - require("@arangodb/users").document("my-user"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock USER_04_documentUser - -All ---- - -`users.all()` - -Fetches all existing ArangoDB users from the database. - -*Examples* - - @startDocuBlockInline USER_06_AllUsers - @EXAMPLE_ARANGOSH_OUTPUT{USER_06_AllUsers} - require("@arangodb/users").all(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock USER_06_AllUsers - -Reload ------- - -`users.reload()` - -Reloads the user authentication data on the server - -All user authentication data is loaded by the server once on startup only and is -cached after that. When users get added or deleted, a cache flush is done -automatically, and this can be performed by a call to this method. - -*Examples* - - @startDocuBlockInline USER_03_reloadUser - @EXAMPLE_ARANGOSH_OUTPUT{USER_03_reloadUser} - require("@arangodb/users").reload(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock USER_03_reloadUser - - -Permission --------- - -`users.permission(user, database[, collection])` - -Fetches the access level to the database or a collection. - -The user and database name must be specified, optionally you can specify -the collection name. - -This method will fail if the user cannot be found in the database. - -*Examples* - - @startDocuBlockInline USER_05_permission - @EXAMPLE_ARANGOSH_OUTPUT{USER_05_permission} - ~ require("@arangodb/users").grantDatabase("my-user", "testdb"); - require("@arangodb/users").permission("my-user", "testdb"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock USER_05_permission - - diff --git a/Documentation/Books/Manual/Administration/ManagingUsers/README.md b/Documentation/Books/Manual/Administration/ManagingUsers/README.md deleted file mode 100644 index cc29b618423b..000000000000 --- a/Documentation/Books/Manual/Administration/ManagingUsers/README.md +++ /dev/null @@ -1,310 +0,0 @@ -Managing Users -============== - -The user management in ArangoDB 3 is similar to the ones found in MySQL, -PostgreSQL, or other database systems. - -User management is possible in the [web interface](../../Programs/WebInterface/Users.md) -and in [arangosh](InArangosh.md) while logged on to the *\_system* database. - -Note that usernames *must* not start with `:role:`. - -Actions and Access Levels -------------------------- - -An ArangoDB server contains a list of users. It also defines various -access levels that can be assigned to a user (for details, see below) -and that are needed to perform certain actions. These actions can be grouped -into three categories: - -- server actions -- database actions -- collection actions - -The **server actions** are - -- **create user**: allows to create a new user. - -- **update user**: allows to change the access levels and details of an existing -user. - -- **drop user**: allows to delete an existing user. - -- **create database**: allows to create a new database. - -- **drop database**: allows to delete an existing database. - -- **shutdown server**: remove server from cluster and shutdown - -The **database actions** are tied to a given database, and access -levels must be set -for each database individually. For a given database the actions are - -- **create collection**: allows to create a new collection in the given database. - -- **update collection**: allows to update properties of an existing collection. - -- **drop collection**: allows to delete an existing collection. - -- **create index**: allows to create an index for an existing collection in the -given database. - -- **drop index**: allows to delete an index of an existing collection in the given -database. - -The **collection actions** are tied to a given collection of a given -database, and access levels must be set for each collection individually. -For a given collection the actions are - -- **read document**: read a document of the given collection. - -- **create document**: creates a new document in the given collection. - -- **modify document**: modifies an existing document of the given collection, -this can be an update or replace operation. - -- **drop document**: deletes an existing document of the given collection. - -- **truncate collection**: deletes all documents of a given collection. - -To perform actions on the server level the user needs at least the following -access levels. The access levels are *Administrate* and -*No access*: - -| server action | server level | -|---------------------------|--------------| -| create a database | Administrate | -| drop a database | Administrate | -| create a user | Administrate | -| update a user | Administrate | -| update user access level | Administrate | -| drop a user | Administrate | -| shutdown server | Administrate | - -To perform actions in a specific database (like creating or dropping collections), -a user needs at least the following access level. -The possible access levels for databases are *Administrate*, *Access* and *No access*. -The access levels for collections are *Read/Write*, *Read Only* and *No Access*. - -| database action | database level | collection level | -|------------------------------|----------------|------------------| -| create collection | Administrate | Read/Write | -| list collections | Access | Read Only | -| rename collection | Administrate | Read/Write | -| modify collection properties | Administrate | Read/Write | -| read properties | Access | Read Only | -| drop collection | Administrate | Read/Write | -| create an index | Administrate | Read/Write | -| drop an index | Administrate | Read/Write | -| see index definition | Access | Read Only | - -Note that the access level *Access* for a database is always required to perform -any action on a collection in that database. - -For collections a user needs the following access -levels to the given database and the given collection. The access levels for -the database are *Administrate*, *Access* and *No access*. The access levels -for the collection are *Read/Write*, *Read Only* and *No Access*. - -| action | collection level | database level | -|-----------------------|-------------------------|------------------------| -| read a document | Read/Write or Read Only | Administrate or Access | -| create a document | Read/Write | Administrate or Access | -| modify a document | Read/Write | Administrate or Access | -| drop a document | Read/Write | Administrate or Access | -| truncate a collection | Read/Write | Administrate or Access | - - -*Example* - -For example, given - -- a database *example* -- a collection *data* in the database *example* -- a user *JohnSmith* - -If the user *JohnSmith* is assigned the access level *Access* for the database -*example* and the level *Read/Write* for the collection *data*, then the user -is allowed to read, create, modify or delete documents in the collection -*data*. But the user is, for example, not allowed to create indexes for the -collection *data* nor create new collections in the database *example*. - -Granting Access Levels ----------------------- - -Access levels can be managed via the [web interface](../../Programs/WebInterface/Users.md) -or in [arangosh](InArangosh.md). - -In order to grant an access level to a user, you can assign one of -three access levels for each database and one of three levels for each -collection in a database. The server access level for the user follows -from the database access level in the `_system` database, it is -*Administrate* if and only if the database access level is -*Administrate*. Note that this means that database access level -*Access* does not grant a user server access level *Administrate*. - -### Initial Access Levels - -When a user creates a database the access level of the user for that database is set to *Administrate*. The same is true for creating a collection, in this case the user get *Read/Write* access to the collection. - -### Wildcard Database Access Level - -With the above definition, one must define the database access level for -all database/user pairs in the server, which would be very tedious. In -order to simplify this process, it is possible to define, for a user, -a wildcard database access level. This wildcard is used if the database -access level is *not* explicitly defined for a certain database. Each new created user has an initial database wildcard of *No Access*. - -Changing the wildcard database access level for a user will change the -access level for all databases that have no explicitly defined -access level. Note that this includes databases which will be created -in the future and for which no explicit access levels are set for that -user! - -If you delete the wildcard, the default access level is defined as *No Access*. - -The `root` user has an initial database wildcard of *Administrate*. - -*Example* - -Assume user *JohnSmith* has the following database access levels: - -| | access level | -|------------------|--------------| -| database `*` | Access | -| database `shop1` | Administrate | -| database `shop2` | No Access | - -This will give the user *JohnSmith* the following database level access: - -- database `shop1`: *Administrate* -- database `shop2`: *No Access* -- database `something`: *Access* - -If the wildcard `*` is changed from *Access* to *No Access* then the -permissions will change as follows: - -- database `shop1`: *Administrate* -- database `shop2`: *No Access* -- database `something`: *No Access* - -### Wildcard Collection Access Level - -For each user and database there is a wildcard collection access level. -This level is used for all collections pairs without an explicitly -defined collection access level. Note that this includes collections -which will be created in the future and for which no explicit access -levels are set for a that user! Each new created user has an initial collection wildcard of *No Access*. - -If you delete the wildcard, the system defaults to *No Access*. - -The `root` user has an initial collection wildcard of *Read/Write* in every database. - -When creating a user through [db._createDatabase(name, options, users)](../../DataModeling/Databases/WorkingWith.md#create-database) the access level of the user for this database will be set to *Administrate* and the wildcard for all collections within this database will be set to *Read/Write*. - -*Example* - -Assume user *JohnSmith* has the following database access levels: - -| | access level | -|--------------|--------------| -| database `*` | Access | - -and collection access levels: - -| | access level | -|-----------------------------------------|--------------| -| database `*`, collection `*` | Read/Write | -| database `shop1`, collection `products` | Read-Only | -| database `shop1`, collection `*` | No Access | -| database `shop2`, collection `*` | Read-Only | - -Then the user *doe* will get the following collection access levels: - -- database `shop1`, collection `products`: *Read-Only* -- database `shop1`, collection `customers`: *No Access* -- database `shop2`, collection `reviews`: *Read-Only* -- database `something`, collection `else`: *Read/Write* - -Explanation: - -Database `shop1`, collection `products` directly matches a defined -access level. This level is defined as *Read-Only*. - -Database `shop1`, collection `customers` does not match a defined access -level. However, database `shop1` matches and the wildcard in this -database for collection level is *No Access*. - -Database `shop2`, collection `reviews` does not match a defined access -level. However, database `shop2` matches and the wildcard in this -database for collection level is *Read-Only*. - -Database `somehing`, collection `else` does not match a defined access -level. The database `something` also does have a direct matches. -Therefore the wildcard is selected. The level is *Read/Write*. - -### Permission Resolution - -The access levels for databases and collections are resolved in the following way: - -For a database "*foo*": -1. Check if there is a specific database grant for *foo*, if yes use the granted access level -2. Choose the higher access level of:: - * A wildcard database grant ( for example `grantDatabase('user', '*', 'rw'`) - * A database grant on the `_system` database - -For a collection named "*bar*": -1. Check if there is a specific database grant for *bar*, if yes use the granted access level -2. Choose the higher access level of:: - * Any wildcard access grant in the same database, or on "*/*" (in this example `grantCollection('user', 'foo', '*', 'rw')`) - * The access level for the current database (in this example `grantDatabase('user', 'foo', 'rw'`) - * The access level for the `_system` database - -An exception to this are system collections, here only the access level for the database is used. - -### System Collections - -The access level for system collections cannot be changed. They follow -different rules than user defined collections and may change without further -notice. Currently the system collections follow these rules: - -| collection | access level | -|--------------------------|--------------| -| `_users` (in _system) | No Access | -| `_queues` | Read-Only | -| `_frontend` | Read/Write | -| `*` | *same as db* | - -All other system collections have access level *Read/Write* if the -user has *Administrate* access to the database. They have access level -*Read/Only* if the user has *Access* to the database. - -To modify these system collections you should always use the -specialized APIs provided by ArangoDB. For example -no user has access to the *\_users* collection in the *\_system* -database. All changes to the access levels must be done using the -*@arangodb/users* module, the `/_users/` API or the web interface. - - -### LDAP Users - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -ArangoDB supports LDAP as an external authentication system. For detailed -information please have look into the -[LDAP configuration guide](../../Programs/Arangod/Ldap.md). - -There are a few differences to *normal* ArangoDB users: -- ArangoDB does not "*know*" LDAP users before they first authenticate, calls to various API's using endpoints in `_api/users/*` will **fail** until the user first logs-in -- Access levels of each user are periodically updated, this will happen by default every *5 minutes* -- It is not possible to change permissions on LDAP users directly, only on **roles** -- LDAP users cannot store configuration data per user (affects for example custom settings in the graph viewer) - -To grant access for an LDAP user you will need to create *roles* within the ArangoDB server. A role -is just a user with the __":role:"__ prefix in its name. Role users cannot login as database users, the ":role:" prefix ensures this. -Your LDAP users will need to have at least one role, once the user logs in he will be automatically granted the union of -all access rights of all his roles. Note that a lower right grant in one role will be overwritten by a higher access grant in a different role. - diff --git a/Documentation/Books/Manual/Administration/MasterSlave/DatabaseSetup.md b/Documentation/Books/Manual/Administration/MasterSlave/DatabaseSetup.md deleted file mode 100644 index 8baf01a1c12c..000000000000 --- a/Documentation/Books/Manual/Administration/MasterSlave/DatabaseSetup.md +++ /dev/null @@ -1,245 +0,0 @@ -Per-Database Setup -================== - -This page describes the master/slave replication process based on a specific database within an ArangoDB instance. -That means that only the specified database will be replicated. - -Setting up a working master-slave replication requires two ArangoDB instances: -- **master**: this is the instance where all data-modification operations should be directed to -- **slave**: this is the instance that replicates the data from the master. We will start a _replication applier_ - on it, and it will fetch data from the - master database's _write-ahead log_ and apply its operations locally - -For the following example setup, we will use the instance *tcp://master.domain.org:8529* as the -_master_, and the instance *tcp://slave.domain.org:8530* as a _slave_. - -The goal is to have all data from the database *_system* on _master_ *tcp://master.domain.org:8529* -be replicated to the database *_system* on the _slave_ *tcp://slave.domain.org:8530*. - -On the _master_, nothing special needs to be done, as all write operations will automatically be -logged in the master's _write-ahead log_ (WAL). - -All-in-one setup ----------------- - -To make the replication copy the initial data from the **master** to the **slave** and start the -continuous replication on the **slave**, there is an all-in-one command: - -```js -require("@arangodb/replication").setupReplication(configuration); -``` - -The following example demonstrates how to use the command for setting up replication -for the *_system* database. Note that it should be run on the **slave** and not the -**master**: - -```js -db._useDatabase("_system"); -require("@arangodb/replication").setupReplication({ - endpoint: "tcp://master.domain.org:8529", - username: "myuser", - password: "mypasswd", - verbose: false, - includeSystem: false, - incremental: true, - autoResync: true -}); -``` - -The command will return when the initial synchronization is finished and the continuous replication -has been started, or in case the initial synchronization has failed. - -If the initial synchronization is successful, the command will store the given configuration on -the slave. It also configures the continuous replication to start automatically if the slave is -restarted, i.e. *autoStart* is set to *true*. - -If the command is run while the slave's replication applier is already running, it will first -stop the running applier, drop its configuration and do a resynchronization of data with the -**master**. It will then use the provided configration, overwriting any previously existing replication -configuration on the **slave**. - - -Initial synchronization ------------------------ - -The initial synchronization and continuous replication applier can also be started separately. -To start replication on the **slave**, make sure there currently is no replication applier running. - -The following commands stop a running applier in the slave's *_system* database: - -```js -db._useDatabase("_system"); -require("@arangodb/replication").applier.stop(); -``` - -The *stop* operation will terminate any replication activity in the _system database on the slave. - - -After that, the initial synchronization can be run. It will copy the collections from the **master** -to the **slave**, overwriting existing data. To run the initial synchronization, execute the following -commands on the **slave**: - -```js -db._useDatabase("_system"); -require("@arangodb/replication").sync({ - endpoint: "tcp://master.domain.org:8529", - username: "myuser", - password: "mypasswd", - verbose: false -}); -``` - -Username and password only need to be specified when the **master** requires authentication. -To check what the synchronization is currently doing, supply set the *verbose* option to *true*. -If set, the synchronization will create log messages with the current synchronization status. - -**Warning**: The *sync* command will replace data in the **slave** database with data from the -**master** database! Only execute these commands if you have verified you are on the correct server, -in the correct database! - -The sync operation will return an attribute named *lastLogTick* which we'll need to note. The -last log tick will be used as the starting point for subsequent replication activity. Let's -assume we got the following last log tick: - -```js -{ - "lastLogTick" : "40694126", - ... -} -``` - -Initial synchronization from the ArangoShell --------------------------------------------- - -The initial synchronization via the *sync* command may take a long time to complete. The shell -will block until the slave has completed the initial synchronization or until an error occurs. -By default, the *sync* command in the ArangoShell will poll the slave for a status update every -10 seconds. - -Optionally the *sync* command can be made non-blocking by setting its *async* option to true. -In this case, the *sync command* will return instantly with an id string, and the initial -synchronization will run detached on the master. To fetch the current status of the *sync* -progress from the ArangoShell, the *getSyncResult* function can be used as follows: - -```js -db._useDatabase("_system"); -var replication = require("@arangodb/replication"); - -/* run command in async mode */ -var id = replication.sync({ - endpoint: "tcp://master.domain.org:8529", - username: "myuser", - password: "mypasswd", - async: true -}); - -/* now query the status of our operation */ -print(replication.getSyncResult(id)); -``` - -*getSyncResult* will return *false* as long as the synchronization is not complete, and return the -synchronization result otherwise. - - -Continuous synchronization --------------------------- - -When the initial synchronization is finished, the continuous replication applier can be started using -the last log tick provided by the *sync* command. Before starting it, there is at least one -configuration option to consider: replication on the **slave** will be running until the -**slave** gets shut down. When the slave server gets restarted, replication will be turned off again. -To change this, we first need to configure the slave's replication applier and set its -*autoStart* attribute. - -Here's the command to configure the replication applier with several options, including the -*autoStart* attribute: - -```js -db._useDatabase("_system"); -require("@arangodb/replication").applier.properties({ - endpoint: "tcp://master.domain.org:8529", - username: "myuser", - password: "mypasswd", - autoStart: true, - autoResync: true, - autoResyncRetries: 2, - adaptivePolling: true, - includeSystem: false, - requireFromPresent: false, - idleMinWaitTime: 0.5, - idleMaxWaitTime: 1.5, - verbose: false -}); -``` - -An important consideration for replication is whether data from system collections (such as -*_graphs* or *_users*) should be applied. The *includeSystem* option controls that. If set to -*true*, changes in system collections will be replicated. Otherwise, they will not be replicated. -It is often not necessary to replicate data from system collections, especially because it may -lead to confusion on the slave because the slave needs to have its own system collections in -order to start and keep operational. - -{% hint 'warning' %} -There is a separate option *includeFoxxQueues* for controlling whether Foxx queue jobs from the system -collections `_jobs` and `_queues` collections should be replicated. Documents from these collections -are not replicated by default in order to avoid execution of Foxx queue jobs on the slave. -{% endhint %} - -The *requireFromPresent* attribute controls whether the applier will start synchronizing in case -it detects that the master cannot provide data for the initial tick value provided by the slave. -This may be the case if the master does not have a big enough backlog of historic WAL logfiles, -and when the replication is re-started after a longer pause. When *requireFromPresent* is set to -*true*, then the replication applier will check at start whether the start tick from which it starts -or resumes replication is still present on the master. If not, then there would be data loss. If -*requireFromPresent* is *true*, the replication applier will abort with an appropriate error message. -If set to *false*, then the replication applier will still start, and ignore the data loss. - -The *autoResync* option can be used in conjunction with the *requireFromPresent* option as follows: -when both *requireFromPresent* and *autoResync* are set to *true* and the master cannot provide the -log data the slave had requested, the replication applier will stop as usual. But due to the fact -that *autoResync* is set to true, the slave will automatically trigger a full resync of all data with -the master. After that, the replication applier will go into continuous replication mode again. -Additionally, setting *autoResync* to *true* will trigger a full re-synchronization of data when -the continuous replication is started and detects that there is no start tick value. - -Note that automatic re-synchronization (*autoResync* option set to *true*) may transfer a lot of -data from the master to the slave and can therefore be expensive. Still it's turned on here so -there's less need for manual intervention. - -The *autoResyncRetries* option can be used to control the number of resynchronization retries that -will be performed in a row when automatic resynchronization is enabled and kicks in. Setting this to -*0* will effectively disable *autoResync*. Setting it to some other value will limit the number of retries -that are performed. This helps preventing endless retries in case resynchronizations always fail. - -Now it's time to start the replication applier on the slave using the last log tick we got -before: - -```js -db._useDatabase("_system"); -require("@arangodb/replication").applier.start("40694126"); -``` - -This will replicate all operations happening in the master's system database and apply them -on the slave, too. - -After that, you should be able to monitor the state and progress of the replication -applier by executing the *state* command on the slave server: - -```js - db._useDatabase("_system"); - require("@arangodb/replication").applier.state(); -``` - -Please note that stopping the replication applier on the slave using the *stop* command -should be avoided. The reason is that currently ongoing transactions (that have partly been -replicated to the slave) will be need to be restarted after a restart of the replication -applier. Stopping and restarting the replication applier on the slave should thus only be -performed if there is certainty that the master is currently fully idle and all transactions -have been replicated fully. - -Note that while a slave has only partly executed a transaction from the master, it might keep -a write lock on the collections involved in the transaction. - -You may also want to check the master and slave states via the HTTP APIs -(see [HTTP Interface for Replication](../../../HTTP/Replications/index.html)). - diff --git a/Documentation/Books/Manual/Administration/MasterSlave/README.md b/Documentation/Books/Manual/Administration/MasterSlave/README.md deleted file mode 100644 index 80bed4620adb..000000000000 --- a/Documentation/Books/Manual/Administration/MasterSlave/README.md +++ /dev/null @@ -1,8 +0,0 @@ -Master/Slave Administration -=========================== - -This _Chapter_ includes information related to the administration of a Master/Slave -environment. - -For a general introduction to the ArangoDB Master/Slave environment, please refer -to the Master/Slave [chapter](../../Architecture/DeploymentModes/MasterSlave/README.md). diff --git a/Documentation/Books/Manual/Administration/MasterSlave/ReplicationApplier.md b/Documentation/Books/Manual/Administration/MasterSlave/ReplicationApplier.md deleted file mode 100644 index 904eec6c73b3..000000000000 --- a/Documentation/Books/Manual/Administration/MasterSlave/ReplicationApplier.md +++ /dev/null @@ -1,302 +0,0 @@ -_Replication applier_ -===================== - -Replication configuration -------------------------- - -The replication is turned off by default. In order to create a master-slave setup, -the so-called _replication applier_ needs to be enabled on the _slave_ databases. - -Replication is configured on a per-database level or (starting from v3.3.0) at server level. - -The _replication applier_ on the _slave_ can be used to perform a one-time synchronization -with the _master_ (and then stop), or to perform an ongoing replication of changes. To -resume replication on _slave_ restart, the *autoStart* attribute of the replication -applier must be set to *true*. - -_setupReplication_ Command --------------------------- - -To copy the initial data from the _master_ to the _slave_ and start the -continuous replication, there is an all-in-one command *setupReplication*. - -From _Arangosh_: - -```js -require("@arangodb/replication").setupReplication(configuration); -``` - -The following example demonstrates how to use the command for setting up replication -for the *_system* database. Note that it should be run on the _slave_ and not the _master_: - -```js -db._useDatabase("_system"); -require("@arangodb/replication").setupReplication({ - endpoint: "tcp://master.domain.org:8529", - username: "myuser", - password: "mypasswd", - verbose: false, - includeSystem: false, - incremental: true, - autoResync: true -}); -``` - -The command will return when the initial synchronization is finished and the continuous replication -is started, or in case the initial synchronization has failed. - -If the initial synchronization is successful, the command will store the given configuration on -the _slave_. It also configures the continuous replication to start automatically if the slave is -restarted, i.e. *autoStart* is set to *true*. - -If the command is run while the slave's replication applier is already running, it will first -stop the running applier, drop its configuration and do a resynchronization of data with the -_master_. It will then use the provided configuration, overwriting any previously existing replication -configuration on the slave. - -### Starting and Stopping the _replication applier_ - -To manually start and stop the _replication applier_ in the current database, the *start* and *stop* commands -can be used like this: - -```js -require("@arangodb/replication").applier.start(); -require("@arangodb/replication").applier.stop(); -``` - -**Note**: Starting a _replication applier_ without setting up an initial configuration will -fail. The replication applier will look for its configuration in a file named -*REPLICATION-APPLIER-CONFIG* in the current database's directory. If the file is not present, -ArangoDB will use some default configuration, but it cannot guess the endpoint (the address -of the master database) the applier should connect to. Thus starting the applier without -configuration will fail. - -Note that at the first time you start the applier, you should pass the value returned in the -*lastLogTick* attribute of the initial sync operation. - -**Note**: Starting a database's replication applier via the *start* command will not necessarily -start the applier on the next and following ArangoDB server restarts. Additionally, stopping a -database's replication applier manually will not necessarily prevent the applier from being -started again on the next server start. All of this is configurable separately (hang on reading). - -**Note**: when stopping and restarting the replication applier of database, it will resume where -it last stopped. This is sensible because replication log events should be applied incrementally. -If the replication applier of a database has never been started before, it needs some *tick* value -from the master's log from which to start fetching events. - -There is one caveat to consider when stopping a replication on the slave: if there are still -ongoing replicated transactions that are neither committed or aborted, stopping the replication -applier will cause these operations to be lost for the slave. If these transactions commit on the -master later and the replication is resumed, the slave will not be able to commit these transactions, -too. Thus stopping the replication applier on the slave manually should only be done if there -is certainty that there are no ongoing transactions on the master. - - -### _Replication applier_ Configuration - -To configure the _replication applier_ of a specific database, use the *properties* command. Using -it without any arguments will return the applier's current configuration: - -```js -require("@arangodb/replication").applier.properties(); -``` - -The result might look like this: - -```js -{ - "requestTimeout" : 600, - "connectTimeout" : 10, - "ignoreErrors" : 0, - "maxConnectRetries" : 10, - "chunkSize" : 0, - "autoStart" : false, - "adaptivePolling" : true, - "includeSystem" : true, - "requireFromPresent" : false, - "autoResync" : false, - "autoResyncRetries" : 2, - "verbose" : false -} -``` - -**Note**: There is no *endpoint* attribute configured yet. The *endpoint* attribute is required -for the replication applier to be startable. You may also want to configure a username and password -for the connection via the *username* and *password* attributes. - -```js -require("@arangodb/replication").applier.properties({ - endpoint: "tcp://master.domain.org:8529", - username: "root", - password: "secret", - verbose: false -}); -``` - -This will re-configure the replication applier for the current database. The configuration will be -used from the next start of the replication applier. The replication applier cannot be re-configured -while it is running. It must be stopped first to be re-configured. - -To make the replication applier of the current database start automatically when the ArangoDB server -starts, use the *autoStart* attribute. - -Setting the *adaptivePolling* attribute to *true* will make the replication applier poll the -master database for changes with a variable frequency. The replication applier will then lower the -frequency when the master is idle, and increase it when the master can provide new events). -Otherwise the replication applier will poll the master database for changes with a constant frequency. - -The *idleMinWaitTime* attribute controls the minimum wait time (in seconds) that the replication applier -will intentionally idle before fetching more log data from the master in case the master has already -sent all its log data. This wait time can be used to control the frequency with which the replication -applier sends HTTP log fetch requests to the master in case there is no write activity on the master. - -The *idleMaxWaitTime* attribute controls the maximum wait time (in seconds) that the replication -applier will intentionally idle before fetching more log data from the master in case the master has -already sent all its log data and there have been previous log fetch attempts that resulted in no more -log data. This wait time can be used to control the maximum frequency with which the replication -applier sends HTTP log fetch requests to the master in case there is no write activity on the master -for longer periods. Note that this configuration value will only be used if the option *adaptivePolling* -is set to *true*. - -To set a timeout for connection and following request attempts, use the *connectTimeout* and -*requestTimeout* values. The *maxConnectRetries* attribute configures after how many failed -connection attempts in a row the replication applier will give up and turn itself off. -You may want to set this to a high value so that temporary network outages do not lead to the -replication applier stopping itself. -The *connectRetryWaitTime* attribute configures how long the replication applier will wait -before retrying the connection to the master in case of connection problems. - -The *chunkSize* attribute can be used to control the approximate maximum size of a master's -response (in bytes). Setting it to a low value may make the master respond faster (less data is -assembled before the master sends the response), but may require more request-response roundtrips. -Set it to *0* to use ArangoDB's built-in default value. - -The *includeSystem* attribute controls whether changes to system collections (such as *_graphs* or -*_users*) should be applied. If set to *true*, changes in these collections will be replicated, -otherwise, they will not be replicated. It is often not necessary to replicate data from system -collections, especially because it may lead to confusion on the slave because the slave needs to -have its own system collections in order to start and keep operational. - -{% hint 'warning' %} -There is a separate option *includeFoxxQueues* for controlling whether Foxx queue jobs from the system -collections `_jobs` and `_queues` collections should be replicated. Documents from these collections -are not replicated by default in order to avoid execution of Foxx queue jobs on the slave. -{% endhint %} - -The *requireFromPresent* attribute controls whether the applier will start synchronizing in case -it detects that the master cannot provide data for the initial tick value provided by the slave. -This may be the case if the master does not have a big enough backlog of historic WAL logfiles, -and when the replication is re-started after a longer pause. When *requireFromPresent* is set to -*true*, then the replication applier will check at start whether the start tick from which it starts -or resumes replication is still present on the master. If not, then there would be data loss. If -*requireFromPresent* is *true*, the replication applier will abort with an appropriate error message. -If set to *false*, then the replication applier will still start, and ignore the data loss. - -The *autoResync* option can be used in conjunction with the *requireFromPresent* option as follows: -when both *requireFromPresent* and *autoResync* are set to *true* and the master cannot provide the -log data the slave requests, the replication applier will stop as usual. But due to the fact -that *autoResync* is set to true, the slave will automatically trigger a full resync of all data with -the master. After that, the replication applier will go into continuous replication mode again. -Additionally, setting *autoResync* to *true* will trigger a full re-synchronization of data when -the continuous replication is started and detects that there is no start tick value. - -Automatic re-synchronization may transfer a lot of data from the master to the slave and can be -expensive. It is therefore turned off by default. When turned off, the slave will never perform an -automatic re-synchronization with the master. - -The *autoResyncRetries* option can be used to control the number of resynchronization retries that -will be performed in a row when automatic resynchronization is enabled and kicks in. Setting this to -*0* will effectively disable *autoResync*. Setting it to some other value will limit the number of retries -that are performed. This helps preventing endless retries in case resynchronizations always fail. - -The *verbose* attribute controls the verbosity of the replication logger. Setting it to `true` will -make the replication applier write a line to the log for every operation it performs. This should -only be used for diagnosing replication problems. - -The following example will set most of the discussed properties for the current database's applier: - -```js -require("@arangodb/replication").applier.properties({ - endpoint: "tcp://master.domain.org:8529", - username: "root", - password: "secret", - adaptivePolling: true, - connectTimeout: 15, - maxConnectRetries: 100, - chunkSize: 262144, - autoStart: true, - includeSystem: true, - autoResync: true, - autoResyncRetries: 2, -}); -``` - -After the applier is now fully configured, it could theoretically be started. However, we -may first need an initial synchronization of all collections and their data from the master before -we start the replication applier. - -The only safe method for doing a full synchronization (or re-synchronization) is thus to - -* stop the replication applier on the slave (if currently running) -* perform an initial full sync with the master database -* note the master database's *lastLogTick* value and -* start the continuous replication applier on the slave using this tick value. - -The initial synchronization for the current database is executed with the *sync* command: - -```js -require("@arangodb/replication").sync({ - endpoint: "tcp://master.domain.org:8529", - username: "root", - password: "secret", - includeSystem: true -}); -``` - -The *includeSystem* option controls whether data from system collections (such as *_graphs* and -*_users*) shall be synchronized. - -The initial synchronization can optionally be configured to include or exclude specific -collections using the *restrictType* and *restrictCollection* parameters. - -The following command only synchronizes collection *foo* and *bar*: - -```js -require("@arangodb/replication").sync({ - endpoint: "tcp://master.domain.org:8529", - username: "root", - password: "secret", - restrictType: "include", - restrictCollections: [ "foo", "bar" ] -}); -``` - -Using a *restrictType* of *exclude*, all collections but the specified will be synchronized. - -**Warning**: *sync* will do a full synchronization of the collections in the current database with -collections present in the master database. -Any local instances of the collections and all their data are removed! Only execute this -command if you are sure you want to remove the local data! - -As *sync* does a full synchronization, it might take a while to execute. -When *sync* completes successfully, it returns an array of collections it has synchronized in its -*collections* attribute. It will also return the master database's last log tick value -at the time the *sync* was started on the master. The tick value is contained in the *lastLogTick* -attribute of the *sync* command: - -```js -{ - "lastLogTick" : "231848833079705", - "collections" : [ ... ] -} -``` -Now you can start the continuous synchronization for the current database on the slave -with the command - -```js -require("@arangodb/replication").applier.start("231848833079705"); -``` - -**Note**: The tick values should be treated as strings. Using numeric data types for tick -values is unsafe because they might exceed the 32 bit value and the IEEE754 double accuracy -ranges. diff --git a/Documentation/Books/Manual/Administration/MasterSlave/ServerLevelSetup.md b/Documentation/Books/Manual/Administration/MasterSlave/ServerLevelSetup.md deleted file mode 100644 index 69f208c88010..000000000000 --- a/Documentation/Books/Manual/Administration/MasterSlave/ServerLevelSetup.md +++ /dev/null @@ -1,244 +0,0 @@ -Server-level Setup -================== - -This page describes the replication process based on a complete ArangoDB instance. That means that -all included databases will be replicated. - -**Note:** Server-level Setup is available only from version 3.3.0. - -Setting up a working master-slave replication requires two ArangoDB instances: -- **master**: this is the instance where all data-modification operations should be directed to -- **slave**: this is the instance that replicates the data from the master. We will start a _replication applier_ - on it, and it will fetch data from the - master _write-ahead log_ and apply its operations locally - -For the following example setup, we will use the instance *tcp://master.domain.org:8529* as the -_master_, and the instance *tcp://slave.domain.org:8530* as a _slave_. - -The goal is to have all data of all databases on _master_ *tcp://master.domain.org:8529* -be replicated to the _slave_ instance *tcp://slave.domain.org:8530*. - -On the **master**, nothing special needs to be done, as all write operations will automatically be -logged in the master's _write-ahead log_ (WAL). - -All-in-one setup ----------------- - -To make the replication copy the initial data from the **master** to the **slave** and start the -continuous replication on the **slave**, there is an all-in-one command: - -```js -require("@arangodb/replication").setupReplicationGlobal(configuration); -``` - -The following example demonstrates how to use the command for setting up replication -for the complete ArangoDB instance. Note that it should be run on the **slave** and not the -**master**: - -```js -db._useDatabase("_system"); -require("@arangodb/replication").setupReplicationGlobal({ - endpoint: "tcp://127.0.0.1:8529", - username: "root", - password: "", - autoStart: true -}); -``` - -The command will return when the initial synchronization is finished and the continuous replication -has been started, or in case the initial synchronization has failed. - -If the initial synchronization is successful, the command will store the given configuration on -the slave. It also configures the continuous replication to start automatically if the slave is -restarted, i.e. *autoStart* is set to *true*. - -If the command is run while the slave's replication applier is already running, it will first -stop the running applier, drop its configuration and do a resynchronization of data with the -**master**. It will then use the provided configration, overwriting any previously existing replication -configuration on the **slave**. - - -Stopping synchronization ------------------------- - -The initial synchronization and continuous replication applier can also be started separately. -To start replication on the **slave**, make sure there currently is no replication applier running. - -The following commands stop a running applier in the slave's instance: - -```js -db._useDatabase("_system"); -require("@arangodb/replication").globalApplier.stop(); -``` - -The *stop* operation will terminate any replication activity in the ArangoDB instance on the slave. - - -After that, the initial synchronization can be run. It will copy the collections from the **master** -to the **slave**, overwriting existing data. To run the initial synchronization, execute the following -commands on the **slave**: - -```js -db._useDatabase("_system"); -require("@arangodb/replication").syncGlobal({ - endpoint: "tcp://master.domain.org:8529", - username: "myuser", - password: "mypasswd", - verbose: false -}); -``` - -Username and password only need to be specified when the **master** requires authentication. -To check what the synchronization is currently doing, supply set the *verbose* option to *true*. -If set, the synchronization will create log messages with the current synchronization status. - -**Warning**: The *syncGlobal* command will replace data in the **slave** database with data from the -**master** database! Only execute these commands if you have verified you are on the correct server, -in the correct database! - -The sync operation will return an attribute named *lastLogTick* which we'll need to note. The -last log tick will be used as the starting point for subsequent replication activity. Let's -assume we got the following last log tick: - -```js -{ - "lastLogTick" : "40694126", - ... -} -``` - -Initial synchronization from the ArangoShell --------------------------------------------- - -The initial synchronization via the *syncGlobal* command may take a long time to complete. The shell -will block until the slave has completed the initial synchronization or until an error occurs. -By default, the *syncGlobal* command in the ArangoShell will poll the slave for a status update every -10 seconds. - -Optionally the *syncGlobal* command can be made non-blocking by setting its *async* option to true. -In this case, the *syncGlobal command* will return instantly with an id string, and the initial -synchronization will run detached on the master. To fetch the current status of the *syncGlobal* -progress from the ArangoShell, the *getSyncResult* function can be used as follows: - -```js -db._useDatabase("_system"); -var replication = require("@arangodb/replication"); - -/* run command in async mode */ -var id = replication.syncGlobal({ - endpoint: "tcp://master.domain.org:8529", - username: "myuser", - password: "mypasswd", - async: true -}); - -/* now query the status of our operation */ -print(replication.getSyncResult(id)); -``` - -*getSyncResult* will return *false* as long as the synchronization is not complete, and return the -synchronization result otherwise. - - -Continuous synchronization --------------------------- - -When the initial synchronization is finished, the continuous replication applier can be started using -the last log tick provided by the *syncGlobal* command. Before starting it, there is at least one -configuration option to consider: replication on the **slave** will be running until the -**slave** gets shut down. When the slave server gets restarted, replication will be turned off again. -To change this, we first need to configure the slave's replication applier and set its -*autoStart* attribute. - -Here's the command to configure the replication applier with several options, including the -*autoStart* attribute: - -```js -db._useDatabase("_system"); -require("@arangodb/replication").globalApplier.properties({ - endpoint: "tcp://master.domain.org:8529", - username: "myuser", - password: "mypasswd", - autoStart: true, - autoResync: true, - autoResyncRetries: 2, - adaptivePolling: true, - includeSystem: false, - requireFromPresent: false, - idleMinWaitTime: 0.5, - idleMaxWaitTime: 1.5, - verbose: false -}); -``` - -An important consideration for replication is whether data from system collections (such as -*_graphs* or *_users*) should be applied. The *includeSystem* option controls that. If set to -*true*, changes in system collections will be replicated. Otherwise, they will not be replicated. -It is often not necessary to replicate data from system collections, especially because it may -lead to confusion on the slave because the slave needs to have its own system collections in -order to start and keep operational. - -{% hint 'warning' %} -There is a separate option *includeFoxxQueues* for controlling whether Foxx queue jobs from the system -collections `_jobs` and `_queues` collections should be replicated. Documents from these collections -are not replicated by default in order to avoid execution of Foxx queue jobs on the slave. -{% endhint %} - -The *requireFromPresent* attribute controls whether the applier will start synchronizing in case -it detects that the master cannot provide data for the initial tick value provided by the slave. -This may be the case if the master does not have a big enough backlog of historic WAL logfiles, -and when the replication is re-started after a longer pause. When *requireFromPresent* is set to -*true*, then the replication applier will check at start whether the start tick from which it starts -or resumes replication is still present on the master. If not, then there would be data loss. If -*requireFromPresent* is *true*, the replication applier will abort with an appropriate error message. -If set to *false*, then the replication applier will still start, and ignore the data loss. - -The *autoResync* option can be used in conjunction with the *requireFromPresent* option as follows: -when both *requireFromPresent* and *autoResync* are set to *true* and the master cannot provide the -log data the slave had requested, the replication applier will stop as usual. But due to the fact -that *autoResync* is set to true, the slave will automatically trigger a full resync of all data with -the master. After that, the replication applier will go into continuous replication mode again. -Additionally, setting *autoResync* to *true* will trigger a full re-synchronization of data when -the continuous replication is started and detects that there is no start tick value. - -Note that automatic re-synchronization (*autoResync* option set to *true*) may transfer a lot of -data from the master to the slave and can therefore be expensive. Still it's turned on here so -there's less need for manual intervention. - -The *autoResyncRetries* option can be used to control the number of resynchronization retries that -will be performed in a row when automatic resynchronization is enabled and kicks in. Setting this to -*0* will effectively disable *autoResync*. Setting it to some other value will limit the number of retries -that are performed. This helps preventing endless retries in case resynchronizations always fail. - -Now it's time to start the replication applier on the slave using the last log tick we got -before: - -```js -db._useDatabase("_system"); -require("@arangodb/replication").globalApplier.start("40694126"); -``` - -This will replicate all operations happening in the master's system database and apply them -on the slave, too. - -After that, you should be able to monitor the state and progress of the replication -applier by executing the *state* command on the slave server: - -```js -db._useDatabase("_system"); -require("@arangodb/replication").globalApplier.state(); -``` - -Please note that stopping the replication applier on the slave using the *stop* command -should be avoided. The reason is that currently ongoing transactions (that have partly been -replicated to the slave) will be need to be restarted after a restart of the replication -applier. Stopping and restarting the replication applier on the slave should thus only be -performed if there is certainty that the master is currently fully idle and all transactions -have been replicated fully. - -Note that while a slave has only partly executed a transaction from the master, it might keep -a write lock on the collections involved in the transaction. - -You may also want to check the master and slave states via the HTTP APIs -(see [HTTP Interface for Replication](../../../HTTP/Replications/index.html)). - diff --git a/Documentation/Books/Manual/Administration/MasterSlave/SettingUp.md b/Documentation/Books/Manual/Administration/MasterSlave/SettingUp.md deleted file mode 100644 index b2ebb710fb9b..000000000000 --- a/Documentation/Books/Manual/Administration/MasterSlave/SettingUp.md +++ /dev/null @@ -1,4 +0,0 @@ -Setting up Replication in a _Master/Slave_ environment -====================================================== - -This _Section_ includes information on how to set up a _Master/Slave_ environment. \ No newline at end of file diff --git a/Documentation/Books/Manual/Administration/MasterSlave/SyncingCollections.md b/Documentation/Books/Manual/Administration/MasterSlave/SyncingCollections.md deleted file mode 100644 index 7425d30d3e51..000000000000 --- a/Documentation/Books/Manual/Administration/MasterSlave/SyncingCollections.md +++ /dev/null @@ -1,78 +0,0 @@ -Syncing Collections -=================== - -In order to synchronize data for a single collection from a _master_ to a _slave_ instance, there -is the *syncCollection* function: - -It will fetch all documents of the specified collection from the master database and store -them in the local instance. After the synchronization, the collection data on the slave will be -identical to the data on the master, provided no further data changes happen on the master. -Any data changes that are performed on the master after the synchronization was started will -not be captured by *syncCollection*, but need to be replicated using the regular replication -applier mechanism. - -For the following example setup, we'll use the instance *tcp://master.domain.org:8529* as the -master, and the instance *tcp://slave.domain.org:8530* as a slave. - -The goal is to have all data from the collection *test* in database *_system* on master -*tcp://master.domain.org:8529* be replicated to the collection *test* in database *_system* on -the slave *tcp://slave.domain.org:8530*. - -On the **master**, the collection *test* needs to be present in the *_system* database, with -any data in it. - -To transfer this collection to the **slave**, issue the following commands there: - -```js -db._useDatabase("_system"); -require("@arangodb/replication").syncCollection("test", { - endpoint: "tcp://master.domain.org:8529", - username: "myuser", - password: "mypasswd" -}); -``` - -**Warning**: The syncCollection command will replace the collection's data in the slave database -with data from the master database! Only execute these commands if you have verified you are on -the correct server, in the correct database! - -Setting the optional *incremental* attribute in the call to *syncCollection* will start an -incremental transfer of data. This may be useful in case when the slave already has -parts or almost all of the data in the collection and only the differences need to be -synchronized. Note that to compute the differences the incremental transfer will build a sorted -list of all document keys in the collection on both the slave and the master, which may still be -expensive for huge collections in terms of memory usage and runtime. During building the list -of keys the collection will be read-locked on the master. - -The *initialSyncMaxWaitTime* attribute in the call to *syncCollection* controls how long the -slave will wait for a master's response. This wait time can be used to control after what time -the synchronization will give up and fail. - -The *syncCollection* command may take a long time to complete if the collection is big. The shell -will block until the slave has synchronized the entire collection from the master or until an -error occurs. By default, the *syncCollection* command in the ArangoShell will poll for a status -update every 10 seconds. - -When *syncCollection* is called from the ArangoShell, the optional *async* attribute can be used -to start the synchronization as a background process on the slave. If *async* is set to *true*, -the call to *syncCollection* will return almost instantly with an id string. Using this id string, -the status of the sync job on the slave can be queried using the *getSyncResult* function as follows: - -```js -db._useDatabase("_system"); -var replication = require("@arangodb/replication"); - -/* run command in async mode */ -var id = replication.syncCollection("test", { - endpoint: "tcp://master.domain.org:8529", - username: "myuser", - password: "mypasswd", - async: true -}); - -/* now query the status of our operation */ -print(replication.getSyncResult(id)); -``` - -*getSyncResult* will return *false* as long as the synchronization is not complete, and return the -synchronization result otherwise. diff --git a/Documentation/Books/Manual/Administration/README.md b/Documentation/Books/Manual/Administration/README.md deleted file mode 100644 index 2e7ae91f3b30..000000000000 --- a/Documentation/Books/Manual/Administration/README.md +++ /dev/null @@ -1,48 +0,0 @@ -Administration -============== - -Tools ------ - -Deployments of ArangoDB servers can be managed with the following tools: - -- [**Web interface**](../Programs/WebInterface/README.md): - [_Arangod_](../Programs/Arangod/README.md) serves a graphical web interface to - be accessed with a browser via the server port. It provides basic and advanced - functionality to interact with the server and its data. -{### TODO: In case of a cluster, the web interface can be reached via any of the coordinators. What about other deployment modes? ###} - -- **ArangoShell**: [_Arangosh_](../Programs/Arangosh/README.md) is a V8 shell to - interact with any local or remote ArangoDB server through a JavaScript - interface. It can be used to automate tasks. Some developers may prefer it over - the web interface, especially for simple CRUD. It is not to be confused with - general command lines like Bash or PowerShell. - -- **RESTful API**: _Arangod_ has an [HTTP interface](../../HTTP/index.html) through - which it can be fully managed. The official client tools including _Arangosh_ and - the Web interface talk to this bare metal interface. It is also relevant for - [driver](../../Drivers/index.html) developers. - -- [**ArangoDB Starter**](../Programs/Starter/README.md): This deployment tool - helps to start _Arangod_ instances, like for a Cluster or an Active Failover setup. - -For a full list of tools, please refer to the [Programs & Tools](../Programs/README.md) chapter. - -Deployment Administration -------------------------- - -- [Master/Slave](MasterSlave/README.md) -- [Active Failover](ActiveFailover/README.md) -- [Cluster](Cluster/README.md) -- [Datacenter to datacenter replication](DC2DC/README.md) -- [ArangoDB Starter Administration](Starter/README.md) - -Other Topics ------------- - -- [Configuration](Configuration/README.md) -- [Backup & Restore](../BackupRestore/README.md) -- [Import & Export](ImportExport.md) -- [User Management](ManagingUsers/README.md) -- [Switch Storage Engine](Engine/SwitchEngine.md) - diff --git a/Documentation/Books/Manual/Administration/Starter/README.md b/Documentation/Books/Manual/Administration/Starter/README.md deleted file mode 100644 index ba29c69d3218..000000000000 --- a/Documentation/Books/Manual/Administration/Starter/README.md +++ /dev/null @@ -1,7 +0,0 @@ - -# ArangoDB Starter Administration - -This chapter documents administering the _ArangoDB Starter_. - -- [Remove a machine from the cluster](./Removal.md) -- [Recover from a failed machine](./Recovery.md) diff --git a/Documentation/Books/Manual/Administration/Starter/Recovery.md b/Documentation/Books/Manual/Administration/Starter/Recovery.md deleted file mode 100644 index 3f22888e3615..000000000000 --- a/Documentation/Books/Manual/Administration/Starter/Recovery.md +++ /dev/null @@ -1,38 +0,0 @@ - -# ArangoDB Starter Recovery Procedure - -This procedure is intended to recover a cluster (that was started with the ArangoDB -_Starter_) when a machine of that cluster is broken without the possibility to recover -it (e.g. complete HD failure). In the procedure is does not matter if a replacement -machine uses the old or a new IP address. - -To recover from this scenario, you must: - -- Create a new (replacement) machine with ArangoDB (including _Starter_) installed. -- Create a file called `RECOVERY` in the directory you are going to use as data - directory of the _Starter_ (the one that is passed via the option `--starter.data-dir`). - This file must contain the IP address and port of the _Starter_ that has been - broken (and will be replaced with this new machine). - -E.g. - -```bash -echo "192.168.1.25:8528" > $DATADIR/RECOVERY -``` - -After creating the `RECOVERY` file, start the _Starter_ using all the normal command -line arguments. - -The _Starter_ will now: - -1. Talk to the remaining _Starters_ to find the ID of the _Starter_ it replaces and - use that ID to join the remaining _Starters_. -1. Talk to the remaining _Agents_ to find the ID of the _Agent_ it replaces and - adjust the command-line arguments of the _Agent_ (it will start) to use that ID. - This is skipped if the _Starter_ was not running an _Agent_. -1. Remove the `RECOVERY` file from the data directory. - -The cluster will now recover automatically. It will however have one more _Coordinators_ -and _DBServers_ than expected. Exactly one _Coordinator_ and one _DBServer_ will -be listed "red" in the web UI of the database. They will have to be removed manually -using the ArangoDB Web UI. diff --git a/Documentation/Books/Manual/Administration/Starter/Removal.md b/Documentation/Books/Manual/Administration/Starter/Removal.md deleted file mode 100644 index 1c612a329bcb..000000000000 --- a/Documentation/Books/Manual/Administration/Starter/Removal.md +++ /dev/null @@ -1,60 +0,0 @@ - -# ArangoDB Starter Removal Procedure - -This procedure is intended to remove a machine from a cluster -(that was started with the ArangoDB _Starter_). - -It is possible to run this procedure while the machine is still running -or when it has already been removed. - -It is not possible to remove machines that have an agent on it! -Use the [recovery procedure](./Recovery.md) if you have a failed machine -with an agent on it. - -Note that it is highly recommended to remove a machine while it is still running. - -To remove a machine from a cluster, run the following command: - -```bash -arangodb remove starter --starter.endpoint= [--starter.id=] [--force] -``` - -Where `` is the endpoint of the starter that you want to remove, -or the endpoint of one of the remaining starters. E.g. `http://localhost:8528`. - -If you want to remove a machine that is no longer running, use the `--starter.id` -option. Set it to the ID of the ArangoDB _Starter_ on the machine that you want to remove. - -You can find this ID in a `setup.json` file in the data directory of one of -the remaining ArangoDB _Starters_. - -E.g. -```json -{ - ... - "peers": { - "Peers": [ - { - "ID": "21e42415", - "Address": "10.21.56.123", - "Port": 8528, - "PortOffset": 0, - "DataDir": "/mydata/server1", - "HasAgent": true, - "IsSecure": false - }, - ... -} -``` - -If the machine you want to remove has address `10.21.56.123` and was listening -on port `8528`, use ID `21e42415`. - -The `remove starter` command will attempt the cleanout all data from the servers -of the machine that you want to remove. -This can take a long of time. -If the cleanout fails, the `remove starter` command will fail. - -If you want to remove the machine even when the cleanout has failed, use -the `--force` option. -Note that this may lead to data loss! diff --git a/Documentation/Books/Manual/Analyzers/README.md b/Documentation/Books/Manual/Analyzers/README.md deleted file mode 100644 index 4cb76f5a3933..000000000000 --- a/Documentation/Books/Manual/Analyzers/README.md +++ /dev/null @@ -1,181 +0,0 @@ -# Analyzers powered by IResearch - -## Background - -The concept of value "analysis" refers to the process of breaking up a given -value into a set of sub-values, which are internally tied together by metadata, -which in turn influences both the search and sort stages to provide the most -appropriate match for the specified conditions, similar to queries to web -search engines. - -In plain terms this means a user can for example: - -- request documents where the `body` attribute best matches `a quick brown fox` -- request documents where the `dna` attribute best matches a DNA sub sequence -- request documents where the `name` attribute best matches gender -- etc. (via custom analyzers) - -## What are Analyzers - -Analyzers are helpers that allow a user the parse and transform an arbitrary -value, currently only string values are supported, into zero or more resulting -values. The parsing and transformation applied is directed by the analyzer -*type* and the analyzer *properties*. - -The Analyzer implementations themselves are provided by the underlying -[IResearch library](https://github.com/iresearch-toolkit/iresearch). -Therefore their most common usecase for filter condition matching is with -[ArangoSearch Views](../Views/ArangoSearch/README.md). -However, Analyzers can be used as standalone helpers via the `TOKENS(...)` -function, allowing a user to leverage the value transformation power of the -Analyzer in any context where an AQL function can be used. - -A user-visible Analyzer is simply an alias for an underlying implementation -*type* + configuration *properties* and a set of *features*. The *features* -dictate what term matching capabilities are available and as such are only -applicable in the context of ArangoSearch Views. - -The aforementioned three configuration attributes that an Analyzer is composed -of are given a simple *name* that can be used to reference the said Analyzer. -Thus an analyzer definition is composed of the following attributes: - -- *name*: the analyzer name -- *type*: the analyzer type -- *properties*: the properties used to configure the specified type -- *features*: the set of features to set on the analyzer generated fields - -The valid values for *type* is any Analyzer type available. - -The valid values for the *properties* are dependant on what *type* is used. For -example for the *text* type its property may simply be an object with the value -`"locale": "en"`, whereas for the "delimited" type its property may simply be -the delimiter `,`. - -The valid values for the *features* are dependant on both the capabilities of -the underlying *type* and the query filtering and sorting functions that the -result can be used with. For example the *text* type will produce -*frequency* + *norm* + *position* and the `PHRASE(...)` function requires -*frequency* + *position* to be available. - -Currently the following *features* are supported: - -- *frequency*: how often a term is seen, required for PHRASE(...) -- *norm*: the field normalization factor -- *position*: sequentially increasing term position, required for PHRASE(...) - if present then the *frequency* feature is also required - -## Analyzer usage - -For Analyzer usage in the context of ArangoSearch Views please see the section -[ArangoSearch Views](../Views/ArangoSearch/README.md). - -The value transformation capabilities of a given analyzer can be invoked via -the `TOKENS(...)` function to for example: -- break up a string of words into individual words, while also optionally - filtering out stopwords, applying case conversion and extracting word stems -- parse CSV/TSV or other delimiter encoded string values into individual fields - -The signature of the `TOKENS(...)` function is: - - TOKENS(, ) - -It currently accepts any string value, and an analyzer name, and will produce -an array of zero or more tokens generated by the specified analyzer -transformation. - -## Analyzer management - -The following operations are exposed via JavaScript and REST APIs for analyzer -management: - -- *create*: creation of a new analyzer definition -- *get*: retrieve an existing analyzer definition -- *list*: retrieve a listing of all available analyzer definitions -- *remove*: remove an analyzer definition - -### JavaScript - -The JavaScript API is accessible via the `@arangodb/analyzers` endpoint from -both server-side and client-side code, e.g. - -```js -var analyzers = require("@arangodb/analyzers"); -``` - -The *create* operation is accessible via: - -```js -analyzers.save(, [, [, ]]) -``` - -… where *properties* can be represented either as a string, an object or a null -value and *features* is an array of string encoded feature names. - -The *get* operation is accessible via: - -```js -analyzers.analyzer() -``` - -The *list* operation is accessible via: - -```js -analyzers.toArray() -``` - -The *remove* operation is accessible via: - -```js -analyzers.remove( [, ]) -``` - -Additionally individual analyzer instances expose getter accessors for the -aforementioned definition attributes: - -```js -analyzer.name() -analyzer.type() -analyzer.properties() -analyzer.features() -``` - -### RESTful API - -The *create* operation is accessible via the *POST* method on the URL: - - /_api/analyzer - -With the Analyzer configuration passed via the body as an object with -attributes: - -- *name*: string (required) -- *type*: string (required) -- *properties*: string or object or null (optional) default: `null` -- *features*: array of strings (optional) default: empty array - -The *get* operation is accessible via the *GET* method on the URL: - - /_api/analyzer/{analyzer-name} - -A successful result will be an object with the fields: -- *name* -- *type* -- *properties* -- *features* - -The *list* operation is accessible via the *GET* method on the URL: - - /_api/analyzer - -A successful result will be an array of object with the fields: -- *name* -- *type* -- *properties* -- *features* - -The *remove* operation is accessible via the *DELETE* method on the URL: - - /_api/analyzer/{analyzer-name}[?force=true] - -Also see [Analyzers](../../HTTP/Analyzers/index.html) in the HTTP book -including a list of available [Analyzer Types](../../HTTP/Analyzers/index.html#analyzer-types). diff --git a/Documentation/Books/Manual/Appendix/Deprecated/README.md b/Documentation/Books/Manual/Appendix/Deprecated/README.md deleted file mode 100644 index 347115f30698..000000000000 --- a/Documentation/Books/Manual/Appendix/Deprecated/README.md +++ /dev/null @@ -1,29 +0,0 @@ -Deprecated -========== - -Features listed in this section should no longer be used, because they are -considered obsolete and may get removed in a future release. They are currently -kept for backward compatibility. There are usually better alternatives to -replace the old features with: - -- **Simple Queries**: Idiomatic interface in arangosh to perform trivial queries. - They are superseded by [AQL queries](../../../AQL/index.html), which can also - be run in arangosh. AQL is a language on its own and way more powerful than - *Simple Queries* could ever be. In fact, the (still supported) *Simple Queries* - are translated internally to AQL, then the AQL query is optimized and run - against the database in recent versions, because of better performance and - reduced maintenance complexity. - -- **Actions**: Snippets of JavaScript code on the server-side for minimal - custom endpoints. Since the Foxx revamp in 3.0, it became really easy to - write [Foxx Microservices](../../Foxx/README.md), which allow you to define - custom endpoints even with complex business logic. - - From v3.5.0 on, the system collections `_routing` and `_modules` are not - created anymore when the `_system` database is first created (blank new data - folder). They are not actively removed, they remain on upgrade or backup - restoration from previous versions. - - You can still find the - [Actions documentation](https://docs.arangodb.com/3.4/Manual/Appendix/Deprecated/Actions/) - in 3.4 or older versions of the documentation. diff --git a/Documentation/Books/Manual/Appendix/Deprecated/SimpleQueries/FulltextQueries.md b/Documentation/Books/Manual/Appendix/Deprecated/SimpleQueries/FulltextQueries.md deleted file mode 100644 index 0335421c3961..000000000000 --- a/Documentation/Books/Manual/Appendix/Deprecated/SimpleQueries/FulltextQueries.md +++ /dev/null @@ -1,112 +0,0 @@ -Fulltext queries -================ - -{% hint 'warning' %} -It is recommended to use AQL instead, see [**Fulltext functions**](../../../../AQL/Functions/Fulltext.html). -{% endhint %} - -ArangoDB allows to run queries on text contained in document attributes. To use -this, a [fulltext index](../../Glossary.md#fulltext-index) must be defined for the attribute of the collection that -contains the text. Creating the index will parse the text in the specified -attribute for all documents of the collection. Only documents will be indexed -that contain a textual value in the indexed attribute. For such documents, the -text value will be parsed, and the individual words will be inserted into the -fulltext index. - -When a fulltext index exists, it can be queried using a fulltext query. - -Fulltext --------- - - - - -queries the fulltext index -`collection.fulltext(attribute, query)` - -The *fulltext* simple query functions performs a fulltext search on the specified -*attribute* and the specified *query*. - -Details about the fulltext query syntax can be found below. - -Note: the *fulltext* simple query function is **deprecated** as of ArangoDB 2.6. -The function may be removed in future versions of ArangoDB. The preferred -way for executing fulltext queries is to use an AQL query using the *FULLTEXT* -[AQL function](../../../../AQL/Functions/Fulltext.html) as follows: - - FOR doc IN FULLTEXT(@@collection, @attributeName, @queryString, @limit) - RETURN doc - - -**Examples** - - - @startDocuBlockInline collectionFulltext - @EXAMPLE_ARANGOSH_OUTPUT{collectionFulltext} - ~ db._drop("emails"); - ~ db._create("emails"); - db.emails.ensureFulltextIndex("content"); - | db.emails.save({ content: - "Hello Alice, how are you doing? Regards, Bob"}); - | db.emails.save({ content: - "Hello Charlie, do Alice and Bob know about it?"}); - db.emails.save({ content: "I think they don't know. Regards, Eve" }); - db.emails.fulltext("content", "charlie,|eve").toArray(); - ~ db._drop("emails"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionFulltext - - -Syntax ------- - -In the simplest form, a fulltext query contains just the sought word. If -multiple search words are given in a query, they should be separated by commas. -All search words will be combined with a logical AND by default, and only such -documents will be returned that contain all search words. This default behavior -can be changed by providing the extra control characters in the fulltext query, -which are: - -* *+*: logical AND (intersection) -* *|*: logical OR (union) -* *-*: negation (exclusion) - -*Examples:* - -* *"banana"*: searches for documents containing "banana" -* *"banana,apple"*: searches for documents containing both "banana" *AND* "apple" -* *"banana,|orange"*: searches for documents containing either "banana" *OR* "orange" *OR* both -* *"banana,-apple"*: searches for documents that contains "banana" but *NOT* "apple". - -Logical operators are evaluated from left to right. - -Each search word can optionally be prefixed with *complete*: or *prefix*:, with -*complete*: being the default. This allows searching for complete words or for -word prefixes. Suffix searches or any other forms are partial-word matching are -currently not supported. - -Examples: - -* *"complete:banana"*: searches for documents containing the exact word "banana" -* *"prefix:head"*: searches for documents with words that start with prefix "head" -* *"prefix:head,banana"*: searches for documents contain words starting with prefix - "head" and that also contain the exact word "banana". - -Complete match and prefix search options can be combined with the logical -operators. - -Please note that only words with a minimum length will get indexed. This minimum -length can be defined when creating the fulltext index. For words tokenization, -the libicu text boundary analysis is used, which takes into account the default -as defined at server startup (*--server.default-language* startup -option). Generally, the word boundary analysis will filter out punctuation but -will not do much more. - -Especially no word normalization, stemming, or similarity analysis will be -performed when indexing or searching. If any of these features is required, it -is suggested that the user does the text normalization on the client side, and -provides for each document an extra attribute containing just a comma-separated -list of normalized words. This attribute can then be indexed with a fulltext -index, and the user can send fulltext queries for this index, with the fulltext -queries also containing the stemmed or normalized versions of words as required -by the user. diff --git a/Documentation/Books/Manual/Appendix/Deprecated/SimpleQueries/GeoQueries.md b/Documentation/Books/Manual/Appendix/Deprecated/SimpleQueries/GeoQueries.md deleted file mode 100644 index 26597884cc9d..000000000000 --- a/Documentation/Books/Manual/Appendix/Deprecated/SimpleQueries/GeoQueries.md +++ /dev/null @@ -1,66 +0,0 @@ -Geo Queries -=========== - -{% hint 'warning' %} -It is recommended to use AQL instead, see [**Geo functions**](../../../../AQL/Functions/Geo.html). -{% endhint %} - -The ArangoDB allows to select documents based on geographic coordinates. In -order for this to work, a geo-spatial index must be defined. This index will -use a very elaborate algorithm to lookup neighbors that is a magnitude faster -than a simple R* index. - -In general a geo coordinate is a pair of latitude and longitude, which must -both be specified as numbers. A geo index can be created on coordinates that -are stored in a single list attribute with two elements like *[-10, +30]* -(latitude first, followed by longitude) or on coordinates stored in two -separate attributes. - -For example, to index the following documents, an index can be created on the -*position* attribute of the documents: - - db.test.save({ position: [ -10, 30 ] }); - db.test.save({ position: [ 10, 45.5 ] }); - - db.test.ensureIndex({ type: "geo", fields: [ "position" ] }); - -If coordinates are stored in two distinct attributes, the index must be created -on the two attributes: - - db.test.save({ latitude: -10, longitude: 30 }); - db.test.save({ latitude: 10, longitude: 45.5 }); - - db.test.ensureIndex({ type: "geo", fields: [ "latitude", "longitude" ] }); - -In order to find all documents within a given radius around a coordinate use -the *within* operator. In order to find all documents near a given document -use the *near* operator. - -It is possible to define more than one geo-spatial index per collection. In -this case you must give a hint using the *geo* operator which of indexes -should be used in a query. - -Near ----- - - -@startDocuBlock collectionNear - -Within ------- - - -@startDocuBlock collectionWithin - -Geo ---- - - -@startDocuBlock collectionGeo - -Related topics --------------- - -Other ArangoDB geographic features are described in: -- [AQL Geo functions](../../../../AQL/Functions/Geo.html) -- [Geo-Spatial indexes](../../../Indexing/Geo.md) diff --git a/Documentation/Books/Manual/Appendix/Deprecated/SimpleQueries/ModificationQueries.md b/Documentation/Books/Manual/Appendix/Deprecated/SimpleQueries/ModificationQueries.md deleted file mode 100644 index e272c3f4e1e0..000000000000 --- a/Documentation/Books/Manual/Appendix/Deprecated/SimpleQueries/ModificationQueries.md +++ /dev/null @@ -1,23 +0,0 @@ -Modification Queries -==================== - -{% hint 'warning' %} -It is recommended to use AQL instead, see [**Data Modification Queries**](../../../../AQL/DataQueries.html#data-modification-queries). -{% endhint %} - -ArangoDB also allows removing, replacing, and updating documents based -on an example document. Every document in the collection will be -compared against the specified example document and be deleted/replaced/ -updated if all attributes match. - -These method should be used with caution as they are intended to remove or -modify lots of documents in a collection. - -All methods can optionally be restricted to a specific number of operations. -However, if a limit is specific but is less than the number of matches, it -will be undefined which of the matching documents will get removed/modified. -[Remove by Example](../../../DataModeling/Documents/DocumentMethods.md#remove-by-example), - [Replace by Example](../../../DataModeling/Documents/DocumentMethods.md#replace-by-example) and -[Update by Example](../../../DataModeling/Documents/DocumentMethods.md#update-by-example) - are described with examples in the subchapter -[Collection Methods](../../../DataModeling/Documents/DocumentMethods.md). \ No newline at end of file diff --git a/Documentation/Books/Manual/Appendix/Deprecated/SimpleQueries/Pagination.md b/Documentation/Books/Manual/Appendix/Deprecated/SimpleQueries/Pagination.md deleted file mode 100644 index 486d6be8b4a3..000000000000 --- a/Documentation/Books/Manual/Appendix/Deprecated/SimpleQueries/Pagination.md +++ /dev/null @@ -1,121 +0,0 @@ -Pagination -========== - -{% hint 'warning' %} -It is recommended to use AQL instead, see the [**LIMIT operation**](../../../../AQL/Operations/Limit.html). -{% endhint %} - -If, for example, you display the result of a user search, then you are in -general not interested in the completed result set, but only the first 10 or so -documents. Or maybe the next 10 documents for the second page. In this case, you -can the *skip* and *limit* operators. These operators work like LIMIT in -MySQL. - -*skip* used together with *limit* can be used to implement pagination. -The *skip* operator skips over the first n documents. So, in order to create -result pages with 10 result documents per page, you can use skip(n * -10).limit(10) to access the 10 documents on the *n*th page. This result -should be sorted, so that the pagination works in a predicable way. - -Limit ------ - - - - -limit -`query.limit(number)` - -Limits a result to the first *number* documents. Specifying a limit of -*0* will return no documents at all. If you do not need a limit, just do -not add the limit operator. The limit must be non-negative. - -In general the input to *limit* should be sorted. Otherwise it will be -unclear which documents will be included in the result set. - - -**Examples** - - - @startDocuBlockInline queryLimit - @EXAMPLE_ARANGOSH_OUTPUT{queryLimit} - ~ db._create("five"); - ~ db.five.save({ name : "one" }); - ~ db.five.save({ name : "two" }); - ~ db.five.save({ name : "three" }); - ~ db.five.save({ name : "four" }); - ~ db.five.save({ name : "five" }); - db.five.all().toArray(); - db.five.all().limit(2).toArray(); - ~ db._drop("five") - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock queryLimit - - - -Skip ----- - - - - -skip -`query.skip(number)` - -Skips the first *number* documents. If *number* is positive, then this -number of documents are skipped before returning the query results. - -In general the input to *skip* should be sorted. Otherwise it will be -unclear which documents will be included in the result set. - -Note: using negative *skip* values is **deprecated** as of ArangoDB 2.6 and -will not be supported in future versions of ArangoDB. - - -**Examples** - - - @startDocuBlockInline querySkip - @EXAMPLE_ARANGOSH_OUTPUT{querySkip} - ~ db._create("five"); - ~ db.five.save({ name : "one" }); - ~ db.five.save({ name : "two" }); - ~ db.five.save({ name : "three" }); - ~ db.five.save({ name : "four" }); - ~ db.five.save({ name : "five" }); - db.five.all().toArray(); - db.five.all().skip(3).toArray(); - ~ db._drop("five") - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock querySkip - - -Ignore any limit with count: - - @startDocuBlockInline cursorCountUnLimited - @EXAMPLE_ARANGOSH_OUTPUT{cursorCountUnLimited} - ~ db._create("five"); - ~ db.five.save({ name : "one" }); - ~ db.five.save({ name : "two" }); - ~ db.five.save({ name : "three" }); - ~ db.five.save({ name : "four" }); - ~ db.five.save({ name : "five" }); - db.five.all().limit(2).count(); - ~ db._drop("five") - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock cursorCountUnLimited - -Counting any limit or skip: - - @startDocuBlockInline cursorCountLimit - @EXAMPLE_ARANGOSH_OUTPUT{cursorCountLimit} - ~ db._create("five"); - ~ db.five.save({ name : "one" }); - ~ db.five.save({ name : "two" }); - ~ db.five.save({ name : "three" }); - ~ db.five.save({ name : "four" }); - ~ db.five.save({ name : "five" }); - db.five.all().limit(2).count(true); - ~ db._drop("five") - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock cursorCountLimit diff --git a/Documentation/Books/Manual/Appendix/Deprecated/SimpleQueries/README.md b/Documentation/Books/Manual/Appendix/Deprecated/SimpleQueries/README.md deleted file mode 100644 index fdc66e631e26..000000000000 --- a/Documentation/Books/Manual/Appendix/Deprecated/SimpleQueries/README.md +++ /dev/null @@ -1,16 +0,0 @@ -Simple Queries -============== - -{% hint 'warning' %} -It is recommended to use [**AQL**](../../../../AQL/index.html) instead. -{% endhint %} - -Simple queries can be used if the query condition is straight forward, i.e., a -document reference, all documents, a query-by-example, or a simple geo query. In -a simple query you can specify exactly one collection and one query criteria. In -the following sections we describe the JavaScript shell interface for simple -queries, which you can use within the ArangoDB shell and within actions and -transactions. For other languages see the corresponding language API -documentation. - -You can find a list of queries at [Collection Methods](../../../DataModeling/Documents/DocumentMethods.md). \ No newline at end of file diff --git a/Documentation/Books/Manual/Appendix/ErrorCodes.md b/Documentation/Books/Manual/Appendix/ErrorCodes.md deleted file mode 100644 index def89e58bcdb..000000000000 --- a/Documentation/Books/Manual/Appendix/ErrorCodes.md +++ /dev/null @@ -1,5 +0,0 @@ -Error codes and meanings -======================== - -@startDocuBlock errorCodes - diff --git a/Documentation/Books/Manual/Appendix/Glossary.md b/Documentation/Books/Manual/Appendix/Glossary.md deleted file mode 100644 index adee1784301a..000000000000 --- a/Documentation/Books/Manual/Appendix/Glossary.md +++ /dev/null @@ -1,250 +0,0 @@ -Glossary -======== - -Collection ----------- - -A collection consists of documents. It is uniquely identified by its collection identifier. -It also has a unique name that clients should use to identify and access it. -Collections can be renamed. It will change the collection name, but not the collection identifier. -Collections contain documents of a specific type. There are currently two types: document (default) and edge. The type is specified by the user when the collection is created, and cannot be changed later. - -Collection Identifier ---------------------- - -A collection identifier identifies a collection in a database. It is a string value and is unique within the database. Up to including ArangoDB 1.1, the collection identifier has been a client's primary means to access collections. Starting with ArangoDB 1.2, clients should instead use a collection's unique name to access a collection instead of its identifier. - -ArangoDB currently uses 64bit unsigned integer values to maintain collection ids internally. When returning collection ids to clients, ArangoDB will put them into a string to ensure the collection id is not clipped by clients that do not support big integers. Clients should treat the collection ids returned by ArangoDB as -opaque strings when they store or use it locally. - -Collection Name ---------------- - -A collection name identifies a collection in a database. It is a string and is unique within the database. Unlike the collection identifier it is supplied by the creator of the collection. The collection name must consist of letters, digits, and the _ (underscore) and - (dash) characters only. Please refer to [NamingConventions](../DataModeling/NamingConventions/CollectionAndViewNames.md) for more information on valid collection names. - -Database --------- - -ArangoDB can handle multiple databases in the same server instance. Databases can be used to logically group and separate data. An ArangoDB database consists of collections and dedicated database-specific worker processes. - -A database contains its own collections (which cannot be accessed from other databases), Foxx applications, and replication loggers and appliers. Each ArangoDB database contains its own system collections (e.g. _users, _replication, ...). - -There will always be at least one database in ArangoDB. This is the default database, named _system. This database cannot be dropped, and provides special operations for creating, dropping, and enumerating databases. Users can create additional databases and give them unique names to access them later. Database management operations cannot be initiated from out of user-defined databases. - -When ArangoDB is accessed via its HTTP REST API, the database name is read from the first part of the request URI path (e.g. /_db/_system/...). If the request URI does not contain a database name, the database name is automatically derived from the endpoint. Please refer to [DatabaseEndpoint](../../HTTP/Database/DatabaseEndpoint.html) for more information. - -Database Name -------------- - -A single ArangoDB instance can handle multiple databases in parallel. When multiple databases are used, each database must be given a unique name. This name is used to uniquely identify a database. The default database in ArangoDB is named _system. - -The database name is a string consisting of only letters, digits and the _ (underscore) and - (dash) characters. User-defined database names must always start with a letter. Database names is case-sensitive. - -Database Organization ---------------------- - -A single ArangoDB instance can handle multiple databases in parallel. By default, there will be at least one database, which is named _system. - -Databases are physically stored in separate sub-directories underneath the database directory, which itself resides in the instance's data directory. - -Each database has its own sub-directory, named database-. The database directory contains sub-directories for the collections of the database, and a file named parameter.json. This file contains the database id and name. - -In an example ArangoDB instance which has two databases, the filesystem layout could look like this: - -``` -data/ # the instance's data directory - databases/ # sub-directory containing all databases' data - database-/ # sub-directory for a single database - parameter.json # file containing database id and name - collection-/ # directory containing data about a collection - database-/ # sub-directory for another database - parameter.json # file containing database id and name - collection-/ # directory containing data about a collection - collection-/ # directory containing data about a collection -``` - -Foxx applications are also organized in database-specific directories inside the application path. The filesystem layout could look like this: - -``` -apps/ # the instance's application directory - system/ # system applications (can be ignored) - _db/ # sub-directory containing database-specific applications - / # sub-directory for a single database - /APP # sub-directory for a single application - /APP # sub-directory for a single application - / # sub-directory for another database - /APP # sub-directory for a single application -``` - -Document --------- - -Documents in ArangoDB are JSON objects. These objects can be nested (to any depth) and may contain arrays. Each document is uniquely identified by its document handle. - -Document Etag -------------- - -The document revision (`_rev` value) enclosed in double quotes. The revision is returned by several HTTP API methods in the Etag HTTP header. - -Document Handle ---------------- - -A document handle uniquely identifies a document in the database. It is a string and consists of the collection's name and the document key (`_key` attribute) separated by /. The document handle is stored in a document's `_id` attribute. - -Document Key ------------- - -A document key is a string that uniquely identifies a document in a -given collection. It can and should be used by clients when specific -documents are searched. Document keys are stored in the `_key` attribute -of documents. The key values are automatically indexed by ArangoDB in -a collection's primary index. Thus looking up a document by its key is -regularly a fast operation. The `_key` value of a document is immutable -once the document has been created. - -By default, ArangoDB will auto-generate a document key if no `_key` -attribute is specified, and use the user-specified `_key` value -otherwise. - -This behavior can be changed on a per-collection level by creating -collections with the `keyOptions` attribute. - -Using `keyOptions` it is possible to disallow user-specified keys completely, or to force a specific regime for auto-generating the `_key` values. - -There are some restrictions for user-defined -keys (see -[NamingConventions for document keys](../DataModeling/NamingConventions/DocumentKeys.md)). - -Document Revision ------------------ - -@startDocuBlock documentRevision - -Edge ----- - -Edges are special documents used for connecting other documents into a graph. An edge describes the connection between two documents using the internal attributes: `_from` and `_to`. These contain document handles, namely the start-point and the end-point of the edge. - -Edge Collection ---------------- - -Edge collections are collections that store edges. - -Edge Definition ---------------- - -Edge definitions are parts of the definition of `named graphs`. They describe which edge collections connect which vertex collections. - -General Graph -------------- - -Module maintaining graph setup in the `_graphs` collection - aka `named graphs`. Configures which edge collections relate to which vertex collections. Ensures graph consistency in modification queries. - -Named Graphs ------------- - -Named graphs enforce consistency between edge collections and vertex collections, so if you remove a vertex, edges pointing to it will be removed too. - -Index ------ - -Indexes are used to allow fast access to documents in a collection. All collections have a primary index, which is the document's _key attribute. This index cannot be dropped or changed. - -Edge collections will also have an automatically created edges index, which cannot be modified. This index provides quick access to documents via the `_from` and `_to` attributes. - -Most user-land indexes can be created by defining the names of the attributes which should be indexed. Some index types allow indexing just one attribute (e.g. fulltext index) whereas other index types allow indexing multiple attributes. - -Indexing the system attribute `_id` in user-defined indexes is not supported by any index type. - -Edges Index ------------ - -An edges index is automatically created for edge collections. It contains connections between vertex documents and is invoked when the connecting edges of a vertex are queried. There is no way to explicitly create or delete edges indexes. - -Fulltext Index --------------- - -A fulltext index can be used to find words, or prefixes of words inside documents. A fulltext index can be defined on one attribute only, and will include all words contained in documents that have a textual value in the index attribute. Since ArangoDB 2.6 the index will also include words from the index attribute if the index attribute is an array of strings, or an object with string value members. - -For example, given a fulltext index on the `translations` attribute and the following documents, then searching for `лиса` using the fulltext index would return only the first document. Searching for the index for the exact string `Fox` would return the first two documents, and searching for `prefix:Fox` would return all three documents: - - { translations: { en: "fox", de: "Fuchs", fr: "renard", ru: "лиса" } } - { translations: "Fox is the English translation of the German word Fuchs" } - { translations: [ "ArangoDB", "document", "database", "Foxx" ] } - -If the index attribute is neither a string, an object or an array, its contents will not be indexed. When indexing the contents of an array attribute, an array member will only be included in the index if it is a string. When indexing the contents of an object attribute, an object member value will only be included in the index if it is a string. Other data types are ignored and not indexed. - -Only words with a (specifiable) minimum length are indexed. Word tokenization is done using the word boundary analysis provided by libicu, which is taking into account the selected language provided at server start. Words are indexed in their lower-cased form. The index supports complete match queries (full words) and prefix queries. - -Geo Index ---------- - -A geo index is used to find places on the surface of the earth fast. - -Index Handle ------------- - -An index handle uniquely identifies an index in the database. It is a string and consists of a collection name and an index identifier separated by /. - -Hash Index ----------- - -A hash index is used to find documents based on examples. A hash index can be created for one or multiple document attributes. - -A hash index will only be used by queries if all indexed attributes are present in the example or search query, and if all attributes are compared using the equality (== operator). That means the hash index does not support range queries. - -A unique hash index has an amortized complexity of O(1) for lookup, insert, update, and remove operations. -The non-unique hash index is similar, but amortized lookup performance is O(n), with n being the number of -index entries with the same lookup value. - -Skiplist Index --------------- - -A skiplist is a sorted index type that can be used to find ranges of documents. - - -Anonymous Graphs ----------------- - -You may use edge collections with vertex collections without the graph management facilities. However, graph consistency is not enforced by these. If you remove vertices, you have to ensure by yourselves edges pointing to this vertex are removed. Anonymous graphs may not be browsed using graph viewer in the webinterface. This may be faster in some scenarios. - -View ----- - -A view is conceptually a transformation function over documents from zero or -more collections. It is uniquely identified by its view identifier. It also has -a unique name that clients should use to identify and access it. Views can be -renamed. Renaming a view will change the view name, but not the view identifier. -The conceptual transformation function employed by a view type is implementation -specific. The type is specified by the user when the view is created, and cannot -be changed later. The following view types are currently supported: -* [`arangosearch`](../Views/ArangoSearch/README.md) - -View Identifier ---------------- - -A view identifier identifies a view in a database. It is a string value and is -unique within the database. Clients should use a view's unique name to access a -view instead of its identifier. - -ArangoDB currently uses 64bit unsigned integer values to maintain view ids -internally. When returning view ids to clients, ArangoDB will put them into a -string to ensure the view id is not clipped by clients that do not support big -integers. Clients should treat the view ids returned by ArangoDB as opaque -strings when they store or use them locally. - -View Name ---------- - -A view name identifies a view in a database. It is a string and is unique within -the database. Unlike the view identifier it is supplied by the creator of the -view. The view name must consist of letters, digits, and the _ (underscore) -and - (dash) characters only. Please refer to -[Naming Conventions](../DataModeling/NamingConventions/CollectionAndViewNames.md) for -more information on valid view names, which follow the same guidelines as -collection names. - -IFF ---- - -Short form of [if and only if](https://en.m.wikipedia.org/wiki/If_and_only_if). diff --git a/Documentation/Books/Manual/Appendix/JavaScriptModules/Actions.md b/Documentation/Books/Manual/Appendix/JavaScriptModules/Actions.md deleted file mode 100644 index 8ad89f611485..000000000000 --- a/Documentation/Books/Manual/Appendix/JavaScriptModules/Actions.md +++ /dev/null @@ -1,161 +0,0 @@ -Module "actions" -================ - -`const actions = require('@arangodb/actions')` - -The action module provides the infrastructure for defining low-level HTTP actions. - -If you want to define HTTP endpoints in ArangoDB you should probably use the [Foxx microservice framework](../../Foxx/README.md) instead. - -Basics ------- - -### Error message - - - -`actions.getErrorMessage(code)` - -Returns the error message for an error code. - -Standard HTTP Result Generators -------------------------------- - -`actions.defineHttp(options)` - -Defines a new action. The *options* are as follows: - -`options.url` - -The URL, which can be used to access the action. This path might contain -slashes. Note that this action will also be called, if a url is given such that -*options.url* is a prefix of the given url and no longer definition -matches. - -`options.prefix` - -If *false*, then only use the action for exact matches. The default is -*true*. - -`options.callback(request, response)` - -The request argument contains a description of the request. A request -parameter *foo* is accessible as *request.parametrs.foo*. A request -header *bar* is accessible as *request.headers.bar*. Assume that -the action is defined for the url */foo/bar* and the request url is -*/foo/bar/hugo/egon*. Then the suffix parts *[ "hugo", "egon" ]* -are availible in *request.suffix*. - -The callback must define fill the *response*. - -* *response.responseCode*: the response code -* *response.contentType*: the content type of the response -* *response.body*: the body of the response - -You can use the functions *ResultOk* and *ResultError* to easily -generate a response. - -### Result ok - - - -`actions.resultOk(req, res, code, result, headers)` - -The function defines a response. *code* is the status code to -return. *result* is the result object, which will be returned as JSON -object in the body. *headers* is an array of headers to returned. -The function adds the attribute *error* with value *false* -and *code* with value *code* to the *result*. - -### Result bad - - - -`actions.resultBad(req, res, error-code, msg, headers)` - -The function generates an error response. - -### Result not found - - - -`actions.resultNotFound(req, res, code, msg, headers)` - -The function generates an error response. - -### Result unsupported - - - -`actions.resultUnsupported(req, res, headers)` - -The function generates an error response. - -### Result error - - - -*actions.resultError(*req*, *res*, *code*, *errorNum*, - *errorMessage*, *headers*, *keyvals)* - -The function generates an error response. The response body is an array -with an attribute *errorMessage* containing the error message -*errorMessage*, *error* containing *true*, *code* containing -*code*, *errorNum* containing *errorNum*, and *errorMessage* -containing the error message *errorMessage*. *keyvals* are mixed -into the result. - -### Result not Implemented - - - -`actions.resultNotImplemented(req, res, msg, headers)` - -The function generates an error response. - -### Result permanent redirect - - - -`actions.resultPermanentRedirect(req, res, options, headers)` - -The function generates a redirect response. - -### Result temporary redirect - - - -`actions.resultTemporaryRedirect(req, res, options, headers)` - -The function generates a redirect response. - -ArangoDB Result Generators --------------------------- - -### Collection not found - - - -`actions.collectionNotFound(req, res, collection, headers)` - -The function generates an error response. - -### Index not found - - - -`actions.indexNotFound(req, res, collection, index, headers)` - -The function generates an error response. - -### Result exception - - - -`actions.resultException(req, res, err, headers, verbose)` - -The function generates an error response. If @FA{verbose} is set to -*true* or not specified (the default), then the error stack trace will -be included in the error message if available. If @FA{verbose} is a string -it will be prepended before the error message and the stacktrace will also -be included. diff --git a/Documentation/Books/Manual/Appendix/JavaScriptModules/ArangoDB.md b/Documentation/Books/Manual/Appendix/JavaScriptModules/ArangoDB.md deleted file mode 100644 index 55c66a020a71..000000000000 --- a/Documentation/Books/Manual/Appendix/JavaScriptModules/ArangoDB.md +++ /dev/null @@ -1,226 +0,0 @@ -ArangoDB Module -=============== - -`const arangodb = require('@arangodb')` - -**Note**: This module should not be confused with the [`arangojs` JavaScript driver](https://github.com/arangodb/arangojs) which can be used to access ArangoDB from outside the database. Although the APIs share similarities and the functionality overlaps, the two are not compatible with each other and can not be used interchangeably. - -The `db` object ---------------- - -`arangodb.db` - -The `db` object represents the current database and lets you access collections and run queries. For more information see the [db object reference](../References/DBObject.html). - -**Examples** - -```js -const {db} = require('@arangodb'); - -const thirteen = db._query('RETURN 5 + 8').next(); -``` - -The `aql` template tag ----------------------- - -`arangodb.aql` - -The `aql` function is a JavaScript template string handler (or template tag). -It can be used to write complex AQL queries as multi-line strings without -having to worry about bindVars and the distinction between collections -and regular parameters. - -To use it just prefix a JavaScript template string (the ones with backticks -instead of quotes) with its import name (e.g. `aql`) and pass in variables -like you would with a regular template string. The string will automatically -be converted into an object with `query` and `bindVars` attributes which you -can pass directly to `db._query` to execute. If you pass in a collection it -will be automatically recognized as a collection reference -and handled accordingly. - -Starting with ArangoDB 3.4 queries generated using the `aql` template tag can -be used inside other `aql` template strings, allowing arbitrary nesting. Bind -parameters of nested queries will be merged automatically. - -To find out more about AQL see the [AQL documentation](../../../AQL/index.html). - -**Examples** - -```js - -const filterValue = 23; -const mydata = db._collection('mydata'); -const result = db._query(aql` - FOR d IN ${mydata} - FILTER d.num > ${filterValue} - RETURN d -`).toArray(); - -// nested queries - -const color = "green"; -const filterByColor = aql`FILTER d.color == ${color}'`; -const result2 = db._query(aql` - FOR d IN ${mydata} - ${filterByColor} - RETURN d -`).toArray(); -``` - -The `aql.literal` helper ------------------------- - -`arangodb.aql.literal` - -The `aql.literal` helper can be used to mark strings to be inlined into an AQL -query when using the `aql` template tag, rather than being treated as a bind -parameter. - -{% hint 'danger' %} -Any value passed to `aql.literal` will be treated as part of the AQL query. -To avoid becoming vulnerable to AQL injection attacks you should always prefer -nested `aql` queries if possible. -{% endhint %} - -**Examples** - -```js -const {aql} = require('@arangodb'); - -const filterGreen = aql.literal('FILTER d.color == "green"'); -const result = db._query(aql` - FOR d IN ${mydata} - ${filterGreen} - RETURN d -`).toArray(); -``` - -The `aql.join` helper ---------------------- - -`arangodb.aql.join` - -The `aql.join` helper takes an array of queries generated using the `aql` tag -and combines them into a single query. The optional second argument will be -used as literal string to combine the queries. - -```js -const {aql} = require('@arangodb'); - -// Basic usage -const parts = [aql`FILTER`, aql`x`, aql`%`, aql`2`]; -const joined = aql.join(parts); // aql`FILTER x % 2` - -// Merge without the extra space -const parts = [aql`FIL`, aql`TER`]; -const joined = aql.join(parts, ''); // aql`FILTER`; - -// Real world example: translate keys into document lookups -const users = db._collection("users"); -const keys = ["abc123", "def456"]; -const docs = keys.map(key => aql`DOCUMENT(${users}, ${key})`); -const aqlArray = aql`[${aql.join(docs, ", ")}]`; -const result = db._query(aql` - FOR d IN ${aqlArray} - RETURN d -`).toArray(); -// Query: -// FOR d IN [DOCUMENT(@@value0, @value1), DOCUMENT(@@value0, @value2)] -// RETURN d -// Bind parameters: -// @value0: "users" -// value1: "abc123" -// value2: "def456" - -// Alternative without `aql.join` -const users = db._collection("users"); -const keys = ["abc123", "def456"]; -const result = db._query(aql` - FOR key IN ${keys} - LET d = DOCUMENT(${users}, key) - RETURN d -`).toArray(); -// Query: -// FOR key IN @value0 -// LET d = DOCUMENT(@@value1, key) -// RETURN d -// Bind parameters: -// value0: ["abc123", "def456"] -// @value1: "users" -``` - -The `query` helper ------------------- - -`arangodb.query` - -In most cases you will likely use the `aql` template handler to create a query you directly pass to -`db._query`. To make this even easier ArangoDB provides the `query` template handler, which behaves -exactly like `aql` but also directly executes the query and returns the result cursor instead of -the query object: - -```js -const {query} = require('@arangodb'); - -const filterValue = 23; -const mydata = db._collection('mydata'); -const result = query` - FOR d IN ${mydata} - FILTER d.num > ${filterValue} - RETURN d -`.toArray(); - -// Nesting with `aql` works as expected -const {aql} = require('@arangodb'); - -const filter = aql`FILTER d.num > ${filterValue}`; -const result2 = query` - FOR d IN ${mydata} - ${filter} - RETURN d -`.toArray(); -``` - -The `errors` object -------------------- - -`arangodb.errors` - -This object provides useful objects for each error code ArangoDB might use in `ArangoError` errors. This is helpful when trying to catch specific errors raised by ArangoDB, e.g. when trying to access a document that does not exist. Each object has a `code` property corresponding to the `errorNum` found on `ArangoError` errors. - -For a complete list of the error names and codes you may encounter see the [appendix on error codes](../ErrorCodes.md). - -**Examples** - -```js -const errors = require('@arangodb').errors; - -try { - someCollection.document('does-not-exist'); -} catch (e) { - if (e.isArangoError && e.errorNum === errors.ERROR_ARANGO_DOCUMENT_NOT_FOUND.code) { - throw new Error('Document does not exist'); - } - throw new Error('Something went wrong'); -} -``` - -The `time` function -------------------- - -`arangodb.time` - -This function provides the current time in seconds as a floating point value with microsecond precisison. - -This function can be used instead of `Date.now()` when additional precision is needed. - -**Examples** - -```js -const time = require('@arangodb').time; - -const start = time(); -db._query(someVerySlowQuery); -console.log(`Elapsed time: ${time() - start} secs`); -``` - diff --git a/Documentation/Books/Manual/Appendix/JavaScriptModules/Console.md b/Documentation/Books/Manual/Appendix/JavaScriptModules/Console.md deleted file mode 100644 index fa5fadd2174b..000000000000 --- a/Documentation/Books/Manual/Appendix/JavaScriptModules/Console.md +++ /dev/null @@ -1,187 +0,0 @@ -Console Module -============== - -`global.console === require('console')` - -**Note**: You don't need to load this module directly. The `console` object is globally defined throughout ArangoDB and provides access to all functions in this module. - -console.assert --------------- - -`console.assert(expression, format, argument1, ...)` - -Tests that an expression is *true*. If not, logs a message and throws -an exception. - -*Examples* - -```js -console.assert(value === "abc", "expected: value === abc, actual:", value); -``` - -console.debug -------------- - -`console.debug(format, argument1, ...)` - -Formats the arguments according to *format* and logs the result as -debug message. Note that debug messages will only be logged if the -server is started with log levels *debug* or *trace*. - -String substitution patterns, which can be used in *format*. - -* *%%s* string -* *%%d*, *%%i* integer -* *%%f* floating point number -* *%%o* object hyperlink - -*Examples* - -```js -console.debug("%s", "this is a test"); -``` - -console.dir ------------ - -`console.dir(object)` - -Logs a listing of all properties of the object. - -Example usage: -```js -console.dir(myObject); -``` - -console.error -------------- - -`console.error(format, argument1, ...)` - -Formats the arguments according to @FA{format} and logs the result as -error message. - -String substitution patterns, which can be used in *format*. - -* *%%s* string -* *%%d*, *%%i* integer -* *%%f* floating point number -* *%%o* object hyperlink - -Example usage: -```js -console.error("error '%s': %s", type, message); -``` -console.getline ---------------- - -`console.getline()` - -Reads in a line from the console and returns it as string. - -console.group -------------- - -`console.group(format, argument1, ...)` - -Formats the arguments according to *format* and logs the result as -log message. Opens a nested block to indent all future messages -sent. Call *groupEnd* to close the block. Representation of block -is up to the platform, it can be an interactive block or just a set of -indented sub messages. - -Example usage: - -```js -console.group("user attributes"); -console.log("name", user.name); -console.log("id", user.id); -console.groupEnd(); -``` - -console.groupCollapsed ----------------------- - -`console.groupCollapsed(format, argument1, ...)` - -Same as *console.group*. - -console.groupEnd ----------------- - -`console.groupEnd()` - -Closes the most recently opened block created by a call to *group*. - -console.info ------------- - -`console.info(format, argument1, ...)` - -Formats the arguments according to *format* and logs the result as -info message. - -String substitution patterns, which can be used in *format*. - -* *%%s* string -* *%%d*, *%%i* integer -* *%%f* floating point number -* *%%o* object hyperlink - -Example usage: -```js -console.info("The %s jumped over %d fences", animal, count); -``` -console.log ------------ - -`console.log(format, argument1, ...)` - -Formats the arguments according to *format* and logs the result as -log message. This is an alias for *console.info*. - -console.time ------------- - -`console.time(name)` - -Creates a new timer under the given name. Call *timeEnd* with the -same name to stop the timer and log the time elapsed. - -Example usage: - -```js -console.time("mytimer"); -... -console.timeEnd("mytimer"); // this will print the elapsed time -``` - -console.timeEnd ---------------- - -`console.timeEnd(name)` - -Stops a timer created by a call to *time* and logs the time elapsed. - -console.trace -------------- - -`console.trace()` - -Logs a stack trace of JavaScript execution at the point where it is -called. - -console.warn ------------- - -`console.warn(format, argument1, ...)` - -Formats the arguments according to *format* and logs the result as -warn message. - -String substitution patterns, which can be used in *format*. - -* *%%s* string -* *%%d*, *%%i* integer -* *%%f* floating point number -* *%%o* object hyperlink diff --git a/Documentation/Books/Manual/Appendix/JavaScriptModules/Crypto.md b/Documentation/Books/Manual/Appendix/JavaScriptModules/Crypto.md deleted file mode 100644 index 13b5f5ffe5c0..000000000000 --- a/Documentation/Books/Manual/Appendix/JavaScriptModules/Crypto.md +++ /dev/null @@ -1,347 +0,0 @@ -Crypto Module -============= - -`const crypto = require('@arangodb/crypto')` - -The crypto module provides implementations of various hashing algorithms as well as cryptography related functions. - -Nonces ------- - -These functions deal with cryptographic nonces. - -### createNonce - -`crypto.createNonce(): string` - -Creates a cryptographic nonce. - -Returns the created nonce. - -### checkAndMarkNonce - -`crypto.checkAndMarkNonce(nonce): void` - -Checks and marks a nonce. - -**Arguments** - -* **nonce**: `string` - - The nonce to check and mark. - -Returns nothing. - -Random values -------------- - -The following functions deal with generating random values. - -### rand - -`crypto.rand(): number` - -Generates a random integer that may be positive, negative or even zero. - -Returns the generated number. - -### genRandomAlphaNumbers - -`crypto.genRandomAlphaNumbers(length): string` - -Generates a string of random alpabetical characters and digits. - -**Arguments** - -* **length**: `number` - - The length of the string to generate. - -Returns the generated string. - -### genRandomNumbers - -`crypto.genRandomNumbers(length): string` - -Generates a string of random digits. - -**Arguments** - -* **length**: `number` - - The length of the string to generate. - -Returns the generated string. - -### genRandomSalt - -`crypto.genRandomSalt(length): string` - -Generates a string of random (printable) ASCII characters. - -**Arguments** - -* **length**: `number` - - The length of the string to generate. - -Returns the generated string. - -### genRandomBytes - -`crypto.genRandomBytes(length): Buffer` - -Generates a buffer of random bytes. - -**Arguments** - -* **length**: `number` - - The length of the buffer to generate. - -Returns the generated buffer. - -### uuidv4 - -`crypto.uuidv4(): string` - -Generates a random UUID v4 string. - -Returns the generated UUID string. - -JSON Web Tokens (JWT) ---------------------- - -These methods implement the JSON Web Token standard. - -### jwtEncode - -`crypto.jwtEncode(key, message, algorithm): string` - -Generates a JSON Web Token for the given message. - -**Arguments** - -* **key**: `string | null` - - The secret cryptographic key to be used to sign the message using the given algorithm. - Note that this function will raise an error if the key is omitted but the algorithm expects a key, - and also if the algorithm does not expect a key but a key is provided (e.g. when using `"none"`). - -* **message**: `string` - - Message to be encoded as JWT. Note that the message will only be base64-encoded and signed, not encrypted. - Do not store sensitive information in tokens unless they will only be handled by trusted parties. - -* **algorithm**: `string` - - Name of the algorithm to use for signing the message, e.g. `"HS512"`. - -Returns the JSON Web Token. - -### jwtDecode - -`crypto.jwtDecode(key, token, noVerify): string | null` - -**Arguments** - -* **key**: `string | null` - - The secret cryptographic key that was used to sign the message using the algorithm indicated by the token. - Note that this function will raise an error if the key is omitted but the algorithm expects a key. - - If the algorithm does not expect a key but a key is provided, the token will fail to verify. - -* **token**: `string` - - The token to decode. - - Note that the function will raise an error if the token is malformed (e.g. does not have exactly three segments). - -* **noVerify**: `boolean` (Default: `false`) - - Whether verification should be skipped. If this is set to `true` the signature of the token will not be verified. - Otherwise the function will raise an error if the signature can not be verified using the given key. - -Returns the decoded JSON message or `null` if no token is provided. - -### jwtAlgorithms - -A helper object containing the supported JWT algorithms. Each attribute name corresponds to a JWT `alg` and the value is an object with `sign` and `verify` methods. - -### jwtCanonicalAlgorithmName - -`crypto.jwtCanonicalAlgorithmName(name): string` - -A helper function that translates a JWT `alg` value found in a JWT header into the canonical name of the algorithm in `jwtAlgorithms`. Raises an error if no algorithm with a matching name is found. - -**Arguments** - -* **name**: `string` - - Algorithm name to look up. - -Returns the canonical name for the algorithm. - -Hashing algorithms ------------------- - -### md5 - -`crypto.md5(message): string` - -Hashes the given message using the MD5 algorithm. - -**Arguments** - -* **message**: `string` - - The message to hash. - -Returns the cryptographic hash. - -### sha1 - -`crypto.sha1(message): string` - -Hashes the given message using the SHA-1 algorithm. - -**Arguments** - -* **message**: `string` - - The message to hash. - -Returns the cryptographic hash. - -### sha224 - -`crypto.sha224(message): string` - -Hashes the given message using the SHA-224 algorithm. - -**Arguments** - -* **message**: `string` - - The message to hash. - -Returns the cryptographic hash. - -### sha256 - -`crypto.sha256(message): string` - -Hashes the given message using the SHA-256 algorithm. - -**Arguments** - -* **message**: `string` - - The message to hash. - -Returns the cryptographic hash. - -### sha384 - -`crypto.sha384(message): string` - -Hashes the given message using the SHA-384 algorithm. - -**Arguments** - -* **message**: `string` - - The message to hash. - -Returns the cryptographic hash. - -### sha512 - -`crypto.sha512(message): string` - -Hashes the given message using the SHA-512 algorithm. - -**Arguments** - -* **message**: `string` - - The message to hash. - -Returns the cryptographic hash. - -Miscellaneous -------------- - -### constantEquals - -`crypto.constantEquals(str1, str2): boolean` - -Compares two strings. -This function iterates over the entire length of both strings -and can help making certain timing attacks harder. - -**Arguments** - -* **str1**: `string` - - The first string to compare. - -* **str2**: `string` - - The second string to compare. - -Returns `true` if the strings are equal, `false` otherwise. - -### pbkdf2 - -`crypto.pbkdf2(salt, password, iterations, keyLength): string` - -Generates a PBKDF2-HMAC-SHA1 hash of the given password. - -**Arguments** - -* **salt**: `string` - - The cryptographic salt to hash the password with. - -* **password**: `string` - - The message or password to hash. - -* **iterations**: `number` - - The number of iterations. - This should be a very high number. - OWASP recommended 64000 iterations in 2012 and recommends doubling that number every two years. - - When using PBKDF2 for password hashes it is also recommended to add a random value - (typically between 0 and 32000) to that number that is different for each user. - -* **keyLength**: `number` - - The key length. - -Returns the cryptographic hash. - -### hmac - -`crypto.hmac(key, message, algorithm): string` - -Generates an HMAC hash of the given message. - -**Arguments** - -* **key**: `string` - - The cryptographic key to use to hash the message. - -* **message**: `string` - - The message to hash. - -* **algorithm**: `string` - - The name of the algorithm to use. - -Returns the cryptographic hash. diff --git a/Documentation/Books/Manual/Appendix/JavaScriptModules/FileSystem.md b/Documentation/Books/Manual/Appendix/JavaScriptModules/FileSystem.md deleted file mode 100644 index b90d2e7de870..000000000000 --- a/Documentation/Books/Manual/Appendix/JavaScriptModules/FileSystem.md +++ /dev/null @@ -1,344 +0,0 @@ -Filesystem Module -================= - -`const fs = require('fs')` - -The implementation tries to follow the CommonJS -[Filesystem/A/0](http://wiki.commonjs.org/wiki/Filesystem/A/0) -specification where possible. - -Working Directory ------------------ -The directory functions below shouldn't use the current working directory of the server like `.` or `./test`. -You will not be able to tell whether the environment the server is running in will permit directory listing, -reading or writing of files. - -You should either base your directories with `getTempPath()`, or as a Foxx service use the -[module.context.basePath](../../Foxx/Reference/Context.md). - -Single File Directory Manipulation ----------------------------------- - -### exists - - -checks if a file of any type or directory exists -`fs.exists(path)` - -Returns true if a file (of any type) or a directory exists at a given -path. If the file is a broken symbolic link, returns false. - - -### isFile - - -tests if path is a file -`fs.isFile(path)` - -Returns true if the *path* points to a file. - - -### isDirectory - - -tests if path is a directory -`fs.isDirectory(path)` - -Returns true if the *path* points to a directory. - - -### size - - -gets the size of a file -`fs.size(path)` - -Returns the size of the file specified by *path*. - - -### mtime - - -gets the last modification time of a file -`fs.mtime(filename)` - -Returns the last modification date of the specified file. The date is -returned as a Unix timestamp (number of seconds elapsed since January 1 1970). - - -### pathSeparator -`fs.pathSeparator` - -If you want to combine two paths you can use fs.pathSeparator instead of */* or *\\*. - -### join -`fs.join(path, filename)` - -The function returns the combination of the path and filename, e.g. fs.join(Hello/World, foo.bar) would return Hello/World/foo.bar. - -### getTempFile - - -returns the name for a (new) temporary file -`fs.getTempFile(directory, createFile)` - -Returns the name for a new temporary file in directory *directory*. -If *createFile* is *true*, an empty file will be created so no other -process can create a file of the same name. - -**Note**: The directory *directory* must exist. - - -### getTempPath - - -returns the temporary directory -`fs.getTempPath()` - -Returns the absolute path of the temporary directory - - - -### makeAbsolute - - -makes a given path absolute -`fs.makeAbsolute(path)` - -Returns the given string if it is an absolute path, otherwise an -absolute path to the same location is returned. - - -### chmod - - -sets file permissions of specified files (non windows only) -`fs.chmod(path, mode)` - -where `mode` is a string with a leading zero matching the `OCTAL-MODE` as explained -in *nix `man chmod`. - -Returns true on success. - - -### list - - -returns the directory listing -`fs.list(path)` - -The functions returns the names of all the files in a directory, in -lexically sorted order. Throws an exception if the directory cannot be -traversed (or path is not a directory). - -**Note**: this means that list("x") of a directory containing "a" and "b" would -return ["a", "b"], not ["x/a", "x/b"]. - - -### listTree - - -returns the directory tree -`fs.listTree(path)` - -The function returns an array that starts with the given path, and all of -the paths relative to the given path, discovered by a depth first traversal -of every directory in any visited directory, reporting but not traversing -symbolic links to directories. The first path is always *""*, the path -relative to itself. - - -### makeDirectory - - -creates a directory -`fs.makeDirectory(path)` - -Creates the directory specified by *path*. - - -### makeDirectoryRecursive - - -creates a directory -`fs.makeDirectoryRecursive(path)` - -Creates the directory hierarchy specified by *path*. - - -### remove - - -removes a file -`fs.remove(filename)` - -Removes the file *filename* at the given path. Throws an exception if the -path corresponds to anything that is not a file or a symbolic link. If -"path" refers to a symbolic link, removes the symbolic link. - - -### removeDirectory - - -removes an empty directory -`fs.removeDirectory(path)` - -Removes a directory if it is empty. Throws an exception if the path is not -an empty directory. - - - -### removeDirectoryRecursive - - -removes a directory -`fs.removeDirectoryRecursive(path)` - -Removes a directory with all subelements. Throws an exception if the path -is not a directory. - - -File IO -------- - -### read - - -reads in a file -`fs.read(filename)` - -Reads in a file and returns the content as string. Please note that the -file content must be encoded in UTF-8. - - -### read64 - - -reads in a file as base64 -`fs.read64(filename)` - -Reads in a file and returns the content as string. The file content is -Base64 encoded. - - -### readBuffer - - -reads in a file -`fs.readBuffer(filename)` - -Reads in a file and returns its content in a Buffer object. - - -### readFileSync -`fs.readFileSync(filename, encoding)` - -Reads the contents of the file specified in `filename`. If `encoding` is specified, -the file contents will be returned as a string. Supported encodings are: -- `utf8` or `utf-8` -- `ascii` -- `base64` -- `ucs2` or `ucs-2` -- `utf16le` or `utf16be` -- `hex` - -If no `encoding` is specified, the file contents will be returned in a Buffer -object. - -### write -`fs.write(filename, content)` - -Writes the content into a file. Content can be a string or a Buffer -object. If the file already exists, it is truncated. - -### writeFileSync -`fs.writeFileSync(filename, content)` - -This is an alias for `fs.write(filename, content)`. - -### append -`fs.append(filename, content)` - -Writes the content into a file. Content can be a string or a Buffer -object. If the file already exists, the content is appended at the -end. - -Recursive Manipulation ----------------------- - -### copyRecursive - - -copies a directory structure -`fs.copyRecursive(source, destination)` - -Copies *source* to *destination*. -Exceptions will be thrown on: - - Failure to copy the file - - specifying a directory for destination when source is a file - - specifying a directory as source and destination - - -### CopyFile - - -copies a file into a target file -`fs.copyFile(source, destination)` - -Copies *source* to destination. If Destination is a directory, a file -of the same name will be created in that directory, else the copy will get -the -specified filename. - - -### linkFile - -creates a symbolic link from a target in the place of linkpath. -`fs.linkFile(target, linkpath)` - -In `linkpath` a symbolic link to `target` will be created. - -### move - - -renames a file -`fs.move(source, destination)` - -Moves *source* to destination. Failure to move the file, or -specifying a directory for destination when source is a file will throw an -exception. Likewise, specifying a directory as source and destination will -fail. - - -ZIP ---- - -### unzipFile - - -unzips a file -`fs.unzipFile(filename, outpath, skipPaths, overwrite, password)` - -Unzips the zip file specified by *filename* into the path specified by -*outpath*. Overwrites any existing target files if *overwrite* is set -to *true*. - -Returns *true* if the file was unzipped successfully. - - -### zipFile - - -zips a file -`fs.zipFile(filename, chdir, files, password)` - -Stores the files specified by *files* in the zip file *filename*. If -the file *filename* already exists, an error is thrown. The list of input -files *files* must be given as a list of absolute filenames. If *chdir* is -not empty, the *chdir* prefix will be stripped from the filename in the -zip file, so when it is unzipped filenames will be relative. -Specifying a password is optional. - -Returns *true* if the file was zipped successfully. - - diff --git a/Documentation/Books/Manual/Appendix/JavaScriptModules/Queries.md b/Documentation/Books/Manual/Appendix/JavaScriptModules/Queries.md deleted file mode 100644 index 5244994e8301..000000000000 --- a/Documentation/Books/Manual/Appendix/JavaScriptModules/Queries.md +++ /dev/null @@ -1,91 +0,0 @@ -Queries Module -============== - -`const queries = require('@arangodb/aql/queries')` - -The query module provides the infrastructure for working with currently running AQL queries via arangosh. - -Properties ----------- - -`queries.properties()` Returns the servers current query tracking configuration; we change the slow query threshold to get better results: - - @startDocuBlockInline QUERY_01_properyOfQueries - @EXAMPLE_ARANGOSH_OUTPUT{QUERY_01_properyOfQueries} - var queries = require("@arangodb/aql/queries"); - queries.properties(); - queries.properties({slowQueryThreshold: 1}); - queries.properties({slowStreamingQueryThreshold: 1}); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock QUERY_01_properyOfQueries - -Currently running queries -------------------------- - -We [create a task](Tasks.md) that spawns queries, so we have nice output. Since this task -uses resources, you may want to increase `period` (and not forget to remove it... afterwards): - - @startDocuBlockInline QUERY_02_listQueries - @EXAMPLE_ARANGOSH_OUTPUT{QUERY_02_listQueries} - ~var queries = require("@arangodb/aql/queries"); - var theQuery = 'FOR sleepLoooong IN 1..5 LET sleepLoooonger = SLEEP(1000) RETURN sleepLoooong'; - var tasks = require("@arangodb/tasks"); - |tasks.register({ - | id: "mytask-1", - | name: "this is a sample task to spawn a slow aql query", - | command: "require('@arangodb').db._query('" + theQuery + "');" - }); - |~ while (true) { - |~ require("internal").wait(1); - |~ if (queries.current().filter(function(query) { - |~ return query.query === theQuery; - |~ }).length > 0) { - |~ break; - |~ } - ~} - queries.current(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock QUERY_02_listQueries -The function returns the currently running AQL queries as an array. - -Slow queries ------------- - -The function returns the last AQL queries that exceeded the slow query threshold as an array: - - @startDocuBlockInline QUERY_03_listSlowQueries - @EXAMPLE_ARANGOSH_OUTPUT{QUERY_03_listSlowQueries} - ~var queries = require("@arangodb/aql/queries"); - queries.slow(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock QUERY_03_listSlowQueries - -Clear slow queries ------------------- - -Clear the list of slow AQL queries: - - @startDocuBlockInline QUERY_04_clearSlowQueries - @EXAMPLE_ARANGOSH_OUTPUT{QUERY_04_clearSlowQueries} - ~var queries = require("@arangodb/aql/queries"); - queries.clearSlow(); - queries.slow(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock QUERY_04_clearSlowQueries - -Kill ----- - -Kill a running AQL query: - - @startDocuBlockInline QUERY_05_killQueries - @EXAMPLE_ARANGOSH_OUTPUT{QUERY_05_killQueries} - ~var queries = require("@arangodb/aql/queries"); - ~var tasks = require("@arangodb/tasks"); - ~var theQuery = 'FOR sleepLoooong IN 1..5 LET sleepLoooonger = SLEEP(1000) RETURN sleepLoooong'; - |var runningQueries = queries.current().filter(function(query) { - | return query.query === theQuery; - }); - queries.kill(runningQueries[0].id); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock QUERY_05_killQueries diff --git a/Documentation/Books/Manual/Appendix/JavaScriptModules/README.md b/Documentation/Books/Manual/Appendix/JavaScriptModules/README.md deleted file mode 100644 index 3eac634117e5..000000000000 --- a/Documentation/Books/Manual/Appendix/JavaScriptModules/README.md +++ /dev/null @@ -1,145 +0,0 @@ -JavaScript Modules -================== - -ArangoDB uses a Node.js compatible module system. You can use the function *require* in order to load a module or library. It returns the exported variables and functions of the module. - -The global variables `global`, `process`, `console`, `Buffer`, `__filename` and `__dirname` are available throughout ArangoDB and Foxx. - -Node compatibility modules --------------------------- - -ArangoDB supports a number of modules for compatibility with Node.js, including: - -* [assert](http://nodejs.org/api/assert.html) implements basic assertion and testing functions. - -* [buffer](http://nodejs.org/api/buffer.html) implements a binary data type for JavaScript. - -* [console](Console.md) is a well known logging facility to all the JavaScript developers. - ArangoDB implements most of the [Console API](http://wiki.commonjs.org/wiki/Console), - with the exceptions of *profile* and *count*. - -* [events](http://nodejs.org/api/events.html) implements an event emitter. - -* [fs](FileSystem.md) provides a file system API for the manipulation of paths, directories, files, links, and the construction of file streams. - ArangoDB implements most [Filesystem/A](http://wiki.commonjs.org/wiki/Filesystem/A) functions. - -* [module](http://nodejs.org/api/modules.html) provides direct access to the module system. - -* [path](http://nodejs.org/api/path.html) implements functions dealing with filenames and paths. - -* [punycode](http://nodejs.org/api/punycode.html) implements - conversion functions for [punycode](http://en.wikipedia.org/wiki/Punycode) encoding. - -* [querystring](http://nodejs.org/api/querystring.html) provides utilities for dealing with query strings. - -* [stream](http://nodejs.org/api/stream.html) provides a streaming interface. - -* [string_decoder](https://nodejs.org/api/string_decoder.html) implements logic for decoding buffers into strings. - -* [url](http://nodejs.org/api/url.html) provides utilities for URL resolution and parsing. - -* [util](http://nodejs.org/api/util.html) provides general utility functions like `format` and `inspect`. - -Additionally ArangoDB provides partial implementations for the following modules: - -* `net`: - only `isIP`, `isIPv4` and `isIPv6`. - -* `process`: - only `env` and `cwd`; - stubs for `argv`, `stdout.isTTY`, `stdout.write`, `nextTick`. - -* `timers`: - stubs for `setImmediate`, `setTimeout`, `setInterval`, `clearImmediate`, `clearTimeout`, `clearInterval` and `ref`. - -* `tty`: - only `isatty` (always returns `false`). - -* `vm`: - only `runInThisContext`. - -The following Node.js modules are not available at all: -`child_process`, -`cluster`, -`constants`, -`crypto` (but see `@arangodb/crypto` below), -`dgram`, -`dns`, -`domain`, -`http` (but see `@arangodb/request` below), -`https`, -`os`, -`sys`, -`tls`, -`v8`, -`zlib`. - -ArangoDB Specific Modules -------------------------- - -There are a large number of ArangoDB-specific modules using the `@arangodb` namespace, mostly for internal use by ArangoDB itself. The following however are noteworthy: - -* [@arangodb](ArangoDB.md) provides direct access to the database and its collections. - -* [@arangodb/crypto](Crypto.md) provides various cryptography functions including hashing algorithms. - -* [@arangodb/request](Request.md) provides the functionality for making synchronous HTTP/HTTPS requests. - -* [@arangodb/foxx](../../Foxx/README.md) is the namespace providing the various building blocks of the Foxx microservice framework. - -Bundled NPM Modules -------------------- - -The following [NPM modules](https://www.npmjs.com) are preinstalled: - -* [aqb](https://github.com/arangodb/aqbjs) - is the ArangoDB Query Builder and can be used to construct AQL queries with a chaining JavaScript API. - -* [chai](http://chaijs.com) - is a full-featured assertion library for writing JavaScript tests. - -* [dedent](https://github.com/dmnd/dedent) - is a simple utility function for formatting multi-line strings. - -* [error-stack-parser](http://www.stacktracejs.com) - parses stacktraces into a more useful format. - - - - - -* [graphql-sync](https://github.com/arangodb/graphql-sync) - is an ArangoDB-compatible GraphQL server/schema implementation. - -* [highlight.js](https://highlightjs.org) - is an HTML syntax highlighter. - -* [i (inflect)](https://github.com/pksunkara/inflect) - is a utility library for inflecting (e.g. pluralizing) words. - -* [iconv-lite](https://github.com/ashtuchkin/iconv-lite) - is a utility library for converting between character encodings - -* [joi](https://github.com/hapijs/joi) - is a validation library that is supported throughout the Foxx framework. - -* [js-yaml](https://github.com/nodeca/js-yaml) - is a JavaScript implementation of the YAML data format (a partial superset of JSON). - -* [lodash](https://lodash.com) - is a utility belt for JavaScript providing various useful helper functions. - -* [minimatch](https://github.com/isaacs/minimatch) - is a glob matcher for matching wildcards in file paths. - -* [qs](https://github.com/hapijs/qs) - provides utilities for dealing with query strings using a different format than the **querystring** module. - -* [semver](https://github.com/npm/node-semver) - is a utility library for handling semver version numbers. - -* [sinon](http://sinonjs.org) - is a mocking library for writing test stubs, mocks and spies. - -* [timezone](https://github.com/bigeasy/timezone) - is a library for converting date time values between formats and timezones. diff --git a/Documentation/Books/Manual/Appendix/JavaScriptModules/Request.md b/Documentation/Books/Manual/Appendix/JavaScriptModules/Request.md deleted file mode 100644 index 7d97aa9b049e..000000000000 --- a/Documentation/Books/Manual/Appendix/JavaScriptModules/Request.md +++ /dev/null @@ -1,181 +0,0 @@ -Request Module -============== - -`const request = require('@arangodb/request')` - -The request module provides the functionality for making HTTP requests. - -Making HTTP requests --------------------- - -### HTTP method helpers - -In addition to the *request* function convenience shorthands are available for each HTTP method in the form of, i.e.: - -* `request.head(url, options)` -* `request.get(url, options)` -* `request.post(url, options)` -* `request.put(url, options)` -* `request.delete(url, options)` -* `request.patch(url, options)` - -These are equivalent to using the *request* function directly, i.e.: - -```js -request[method](url, options) -// is equivalent to -request({method, url, ...options}); -``` - -For example: - -```js -const request = require('@arangodb/request'); - -request.get('http://localhost', {headers: {'x-session-id': 'keyboardcat'}}); -// is equivalent to -request({ - method: 'get', - url: 'http://localhost', - headers: {'x-session-id': 'keyboardcat'} -}); -``` - -### The request function - -The request function can be used to make HTTP requests. - -`request(options)` - -Performs an HTTP request and returns a *Response* object. - -*Parameter* - -The request function takes the following options: - -* *url* or *uri*: the fully-qualified URL or a parsed URL from `url.parse`. -* *qs* (optional): object containing querystring values to be appended to the URL. -* *useQuerystring*: if `true`, use `querystring` module to handle querystrings, otherwise use `qs` module. Default: `false`. -* *method* (optional): HTTP method (case-insensitive). Default: `"GET"`. -* *headers* (optional): HTTP headers (case-insensitive). Default: `{}`. -* *body* (optional): request body. Must be a string or `Buffer`, or a JSON serializable value if *json* is `true`. -* *json*: if `true`, *body* will be serialized to a JSON string and the *Content-Type* header will be set to `"application/json"`. Additionally the response body will also be parsed as JSON (unless *encoding* is set to `null`). Default: `false`. -* *form* (optional): when set to a string or object and no *body* has been set, *body* will be set to a querystring representation of that value and the *Content-Type* header will be set to `"application/x-www-form-urlencoded"`. Also see *useQuerystring*. -* *auth* (optional): an object with the properties *username* and *password* for HTTP Basic authentication or the property *bearer* for HTTP Bearer token authentication. -* *sslProtocol* (optional): which tls version should be used to connect to the url. The default is `4` which is TLS 1.0. See [SSL protocol](../../Programs/Arangod/Ssl.md#ssl-protocol) for more opitions. -* *followRedirect*: whether HTTP 3xx redirects should be followed. Default: `true`. -* *maxRedirects*: the maximum number of redirects to follow. Default: `10`. -* *encoding*: encoding to be used for the response body. If set to `null`, the response body will be returned as a `Buffer`. Default: `"utf-8"`. -* *timeout*: number of seconds to wait for a response before aborting the request. -* *returnBodyOnError*: whether the response body should be returned even when the server response indicates an error. Default: `true`. - -The function returns a *Response* object with the following properties: - -* *rawBody*: the raw response body as a `Buffer`. -* *body*: the parsed response body. If *encoding* was not set to `null`, this is a string. If additionally *json* was set to `true` and the response body is well-formed JSON, this is the parsed JSON data. -* *headers*: an object containing the response headers. Otherwise this is identical to *rawBody*. -* *statusCode* and *status*: the HTTP status code of the response, e.g. `404`. -* *message*: the HTTP status message of the response, e.g. `Not Found`. - -#### Forms - -The request module supports `application/x-www-form-urlencoded` (URL encoded) form uploads: - -```js -const request = require('@arangodb/request'); - -var res = request.post('http://service.example/upload', {form: {key: 'value'}}); -// or -var res = request.post({url: 'http://service.example/upload', form: {key: 'value'}}); -// or -var res = request({ - method: 'post', - url: 'http://service.example/upload', - form: {key: 'value'} -}); -``` - -Form data will be encoded using the [qs](https://www.npmjs.com/package/qs) module by default. - -If you want to use the [querystring](http://nodejs.org/api/querystring.html) module instead, simply use the *useQuerystring* option. - -#### JSON - -If you want to submit JSON-serializable values as request bodies, just set the *json* option: - -```js -const request = require('@arangodb/request'); - -var res = request.post('http://service.example/notify', {body: {key: 'value'}, json: true}); -// or -var res = request.post({url: 'http://service.example/notify', body: {key: 'value'}, json: true}); -// or -var res = request({ - method: 'post', - url: 'http://service.example/notify', - body: {key: 'value'}, - json: true -}); -``` - -#### HTTP authentication - -The request module supports both *HTTP Basic* authentication. Just pass the credentials via the *auth* option: - -```js -const request = require('@arangodb/request'); - -var res = request.get( - 'http://service.example/secret', - {auth: {username: 'jcd', password: 'bionicman'}} -); -// or -var res = request.get({ - url: 'http://service.example/secret', - auth: {username: 'jcd', password: 'bionicman'} -}); -// or -var res = request({ - method: 'get', - url: 'http://service.example/secret', - auth: {username: 'jcd', password: 'bionicman'} -}); -``` - -Alternatively you can supply the credentials via the URL: - -```js -const request = require('@arangodb/request'); - -var username = 'jcd'; -var password = 'bionicman'; -var res = request.get( - 'http://' + - encodeURIComponent(username) + - ':' + - encodeURIComponent(password) + - '@service.example/secret' -); -``` - -You can also use *Bearer* token authentication: - -```js -const request = require('@arangodb/request'); - -var res = request.get( - 'http://service.example/secret', - {auth: {bearer: 'keyboardcat'}} -); -// or -var res = request.get({ - url: 'http://service.example/secret', - auth: {bearer: 'keyboardcat'} -}); -// or -var res = request({ - method: 'get', - url: 'http://service.example/secret', - auth: {bearer: 'keyboardcat'} -}); -``` diff --git a/Documentation/Books/Manual/Appendix/JavaScriptModules/Tasks.md b/Documentation/Books/Manual/Appendix/JavaScriptModules/Tasks.md deleted file mode 100644 index c9c709f9578d..000000000000 --- a/Documentation/Books/Manual/Appendix/JavaScriptModules/Tasks.md +++ /dev/null @@ -1,175 +0,0 @@ -Task Management -=============== - -`const tasks = require('@arangodb/tasks')` - -**Note**: If you are trying to schedule tasks in Foxx you should -consider using the [Foxx queues module](../../Foxx/Guides/Scripts.md#queues) instead, -which provides a more high-level API that also persists tasks across reboots. - -## Introduction to Task Management in ArangoDB - -ArangoDB can execute user-defined JavaScript functions as one-shot -or periodic tasks. This functionality can be used to implement timed -or recurring jobs in the database. - -Tasks in ArangoDB consist of a JavaScript snippet or function that is -executed when the task is scheduled. A task can be a one-shot task -(meaning it is run once and not repeated) or a periodic task (meaning -that it is re-scheduled after each execution). Tasks can have optional -parameters, which are defined at task setup time. The parameters -specified at task setup time will be passed as arguments to the -task whenever it gets executed. Periodic Tasks have an execution -frequency that needs to be specified when the task is set up. One-shot -tasks have a configurable delay after which they'll get executed. - -Tasks will be executed on the server they have been set up on. -Tasks will not be shipped around in a cluster. A task will be -executed in the context of the database it was created in. However, -when dropping a database, any tasks that were created in the context -of this database will remain active. It is therefore sensible to -first unregister all active tasks for a database before dropping the -database. - -Tasks registered in ArangoDB will be executed until the server -gets shut down or restarted. After a restart of the server, any -user-defined one-shot or periodic tasks will be lost. - -## Commands for Working with Tasks - -ArangoDB provides the following commands for working with tasks. -All commands can be accessed via the *tasks* module, which can be -loaded like this: - -`require("@arangodb/tasks")` - -Please note that the *tasks* module is available inside the ArangoDB -server only. It cannot be used from the ArangoShell or ArangoDB's web -interface. - -## Register a task - -To register a task, the JavaScript snippet or function needs to be -specified in addition to the execution frequency. Optionally, a task -can have an id and a name. If no id is specified, it will be auto-assigned -for a new task. The task id is also the means to access or unregister a -task later. Task names are informational only. They can be used to make -a task distinguishable from other tasks also running on the server. - -The following server-side commands register a task. The command to be -executed is a JavaScript string snippet which prints a message to the -server's logfile: - - -```js -const tasks = require("@arangodb/tasks"); - -tasks.register({ - id: "mytask-1", - name: "this is a snippet task", - period: 15, - command: "require('console').log('hello from snippet task');" -}); -``` - - -The above has register a task with id *mytask-1*, which will be executed -every 15 seconds on the server. The task will write a log message whenever -it is invoked. - -Tasks can also be set up using a JavaScript callback function like this: - -```js -const tasks = require("@arangodb/tasks"); - -tasks.register({ - id: "mytask-2", - name: "this is a function task", - period: 15, - command: function () { - require('console').log('hello from function task'); - } -}); -``` - -It is important to note that the callback function is late bound and -will be executed in a different context than in the creation context. -The callback function must therefore not access any variables defined -outside of its own scope. The callback function can still define and -use its own variables. - -To pass parameters to a task, the *params* attribute can be set when -registering a task. Note that the parameters are limited to data types -usable in JSON (meaning no callback functions can be passed as parameters -into a task): - -```js -const tasks = require("@arangodb/tasks"); - -tasks.register({ - id: "mytask-3", - name: "this is a parameter task", - period: 15, - command: function (params) { - var greeting = params.greeting; - var data = JSON.stringify(params.data); - require('console').log('%s from parameter task: %s', greeting, data); - }, - params: { greeting: "hi", data: "how are you?" } -}); -``` - -Registering a one-shot task works the same way, except that the -*period* attribute must be omitted. If *period* is omitted, then the -task will be executed just once. The task invocation delay can optionally -be specified with the *offset* attribute: - -```js -const tasks = require("@arangodb/tasks"); - -tasks.register({ - id: "mytask-once", - name: "this is a one-shot task", - offset: 10, - command: function (params) { - require('console').log('you will see me just once!'); - } -}); -``` - -**Note**: When specifying an *offset* value of 0, ArangoDB will internally add -a very small value to the offset so will be slightly greater than zero. - -## Unregister a task - -After a task has been registered, it can be unregistered using its id: - -```js -const tasks = require("@arangodb/tasks"); -tasks.unregister("mytask-1"); -``` - -Note that unregistering a non-existing task will throw an exception. - - -## List all tasks - -To get an overview of which tasks are registered, there is the *get* -method. If the *get* method is called without any arguments, it will -return an array of all tasks: - -```js -const tasks = require("@arangodb/tasks"); -tasks.get(); -``` - -If *get* is called with a task id argument, it will return information -about this particular task: - -```js -const tasks = require("@arangodb/tasks"); -tasks.get("mytask-3"); -``` - -The *created* attribute of a task reveals when a task was created. It is -returned as a Unix timestamp. diff --git a/Documentation/Books/Manual/Appendix/JavaScriptModules/WAL.md b/Documentation/Books/Manual/Appendix/JavaScriptModules/WAL.md deleted file mode 100644 index d7aad8ee7e90..000000000000 --- a/Documentation/Books/Manual/Appendix/JavaScriptModules/WAL.md +++ /dev/null @@ -1,115 +0,0 @@ -Write-ahead log -=============== - -`const wal = require('internal').wal` - -This module provides functionality for administering the write-ahead logs. -Most of these functions only return sensible values when invoked with the -[MMFiles engine being active](../../Programs/Arangod/Server.md#storage-engine). - -Configuration -------------- - - - - -retrieves the configuration of the write-ahead log -`internal.wal.properties()` - -Retrieves the configuration of the write-ahead log. The result is a JSON -array with the following attributes: -- *allowOversizeEntries*: whether or not operations that are bigger than a - single logfile can be executed and stored -- *logfileSize*: the size of each write-ahead logfile -- *historicLogfiles*: the maximum number of historic logfiles to keep -- *reserveLogfiles*: the maximum number of reserve logfiles that ArangoDB - allocates in the background -- *syncInterval*: the interval for automatic synchronization of not-yet - synchronized write-ahead log data (in milliseconds) -- *throttleWait*: the maximum wait time that operations will wait before - they get aborted if case of write-throttling (in milliseconds) -- *throttleWhenPending*: the number of unprocessed garbage-collection - operations that, when reached, will activate write-throttling. A value of - *0* means that write-throttling will not be triggered. - - -**Examples** - - - @startDocuBlockInline WalPropertiesGet - @EXAMPLE_ARANGOSH_OUTPUT{WalPropertiesGet} - require("internal").wal.properties(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock WalPropertiesGet - - - - - -configures the write-ahead log -`internal.wal.properties(properties)` - -Configures the behavior of the write-ahead log. *properties* must be a JSON -JSON object with the following attributes: -- *allowOversizeEntries*: whether or not operations that are bigger than a - single logfile can be executed and stored -- *logfileSize*: the size of each write-ahead logfile -- *historicLogfiles*: the maximum number of historic logfiles to keep -- *reserveLogfiles*: the maximum number of reserve logfiles that ArangoDB - allocates in the background -- *throttleWait*: the maximum wait time that operations will wait before - they get aborted if case of write-throttling (in milliseconds) -- *throttleWhenPending*: the number of unprocessed garbage-collection - operations that, when reached, will activate write-throttling. A value of - *0* means that write-throttling will not be triggered. - -Specifying any of the above attributes is optional. Not specified attributes -will be ignored and the configuration for them will not be modified. - - -**Examples** - - - @startDocuBlockInline WalPropertiesSet - @EXAMPLE_ARANGOSH_OUTPUT{WalPropertiesSet} - | require("internal").wal.properties({ - | allowOverSizeEntries: true, - logfileSize: 32 * 1024 * 1024 }); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock WalPropertiesSet - - -Flushing --------- - - - - -flushes the currently open WAL logfile -`internal.wal.flush(waitForSync, waitForCollector)` - -Flushes the write-ahead log. By flushing the currently active write-ahead -logfile, the data in it can be transferred to collection journals and -datafiles. This is useful to ensure that all data for a collection is -present in the collection journals and datafiles, for example, when dumping -the data of a collection. - -The *waitForSync* option determines whether or not the operation should -block until the not-yet synchronized data in the write-ahead log was -synchronized to disk. - -The *waitForCollector* operation can be used to specify that the operation -should block until the data in the flushed log has been collected by the -write-ahead log garbage collector. Note that setting this option to *true* -might block for a long time if there are long-running transactions and -the write-ahead log garbage collector cannot finish garbage collection. - - -**Examples** - - - @startDocuBlockInline WalFlush - @EXAMPLE_ARANGOSH_OUTPUT{WalFlush} - require("internal").wal.flush(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock WalFlush diff --git a/Documentation/Books/Manual/Appendix/README.md b/Documentation/Books/Manual/Appendix/README.md deleted file mode 100644 index 79d60840ddd5..000000000000 --- a/Documentation/Books/Manual/Appendix/README.md +++ /dev/null @@ -1,11 +0,0 @@ -Appendix -======== - -- [References](References/README.md): Brief overviews over interfaces and objects - - [db](References/DBObject.md): the `db` object - - [collection](References/CollectionObject.md): the `collection` object - - [cursor](References/CursorObject.md): the `cursor` object -- [JavaScript Modules](JavaScriptModules/README.md): List of built-in and supported JS modules -- [Deprecated](Deprecated/README.md): Features that are considered obsolete and may get removed eventually -- [Error codes and meanings](ErrorCodes.md): List of all possible errors that can be encountered -- [Glossary](Glossary.md): Disambiguation page diff --git a/Documentation/Books/Manual/Appendix/References/CollectionObject.md b/Documentation/Books/Manual/Appendix/References/CollectionObject.md deleted file mode 100644 index ef84996fb86f..000000000000 --- a/Documentation/Books/Manual/Appendix/References/CollectionObject.md +++ /dev/null @@ -1,55 +0,0 @@ -The "collection" Object -======================= - -The following methods exist on the collection object (returned by *db.name*): - -*Collection* - -* [collection.checksum()](../../DataModeling/Collections/CollectionMethods.md#checksum) -* [collection.compact()](../../DataModeling/Collections/CollectionMethods.md#compact) -* [collection.count()](../../DataModeling/Documents/DocumentMethods.md#count) -* [collection.drop()](../../DataModeling/Collections/CollectionMethods.md#drop) -* [collection.figures()](../../DataModeling/Collections/CollectionMethods.md#figures) -* [collection.getResponsibleShard()](../../DataModeling/Collections/CollectionMethods.md#getresponsibleshard) -* [collection.load()](../../DataModeling/Collections/CollectionMethods.md#load) -* [collection.properties(options)](../../DataModeling/Collections/CollectionMethods.md#properties) -* [collection.revision()](../../DataModeling/Collections/CollectionMethods.md#revision) -* [collection.rotate()](../../DataModeling/Collections/CollectionMethods.md#rotate) -* [collection.toArray()](../../DataModeling/Documents/DocumentMethods.md#toarray) -* [collection.truncate()](../../DataModeling/Collections/CollectionMethods.md#truncate) -* [collection.type()](../../DataModeling/Documents/DocumentMethods.md#collection-type) -* [collection.unload()](../../DataModeling/Collections/CollectionMethods.md#unload) - -*Indexes* - -* [collection.dropIndex(index)](../../Indexing/WorkingWithIndexes.md#dropping-an-index-via-a-collection-handle) -* [collection.ensureIndex(description)](../../Indexing/WorkingWithIndexes.md#creating-an-index) -* [collection.getIndexes(name)](../../Indexing/WorkingWithIndexes.md#listing-all-indexes-of-a-collection) -* [collection.index(index)](../../Indexing/WorkingWithIndexes.md#index-identifiers-and-handles) - -*Document* - -* [collection.all()](../../DataModeling/Documents/DocumentMethods.md#all) -* [collection.any()](../../DataModeling/Documents/DocumentMethods.md#any) -* [collection.byExample(example)](../../DataModeling/Documents/DocumentMethods.md#query-by-example) -* [collection.closedRange(attribute, left, right)](../../DataModeling/Documents/DocumentMethods.md#closed-range) -* [collection.document(object)](../../DataModeling/Documents/DocumentMethods.md#document) -* [collection.documents(keys)](../../DataModeling/Documents/DocumentMethods.md#lookup-by-keys) -* [collection.edges(vertex-id)](../../DataModeling/Documents/DocumentMethods.md#edges) -* [collection.exists(object)](../../DataModeling/Documents/DocumentMethods.md#exists) -* [collection.firstExample(example)](../../DataModeling/Documents/DocumentMethods.md#first-example) -* [collection.inEdges(vertex-id)](../../DataModeling/Documents/DocumentMethods.md#edges) -* [collection.insert(data)](../../DataModeling/Documents/DocumentMethods.md#insert--save) -* [collection.edges(vertices)](../../DataModeling/Documents/DocumentMethods.md#edges) -* [collection.iterate(iterator,options)](../../DataModeling/Documents/DocumentMethods.md#misc) -* [collection.outEdges(vertex-id)](../../DataModeling/Documents/DocumentMethods.md#edges) -* [collection.range(attribute, left, right)](../../DataModeling/Documents/DocumentMethods.md#range) -* [collection.remove(selector)](../../DataModeling/Documents/DocumentMethods.md#remove) -* [collection.removeByExample(example)](../../DataModeling/Documents/DocumentMethods.md#remove-by-example) -* [collection.removeByKeys(keys)](../../DataModeling/Documents/DocumentMethods.md#remove-by-keys) -* [collection.rename()](../../DataModeling/Collections/CollectionMethods.md#rename) -* [collection.replace(selector, data)](../../DataModeling/Documents/DocumentMethods.md#replace) -* [collection.replaceByExample(example, data)](../../DataModeling/Documents/DocumentMethods.md#replace-by-example) -* [collection.save(data)](../../DataModeling/Documents/DocumentMethods.md#insert--save) -* [collection.update(selector, data)](../../DataModeling/Documents/DocumentMethods.md#update) -* [collection.updateByExample(example, data)](../../DataModeling/Documents/DocumentMethods.md#update-by-example) diff --git a/Documentation/Books/Manual/Appendix/References/CursorObject.md b/Documentation/Books/Manual/Appendix/References/CursorObject.md deleted file mode 100644 index 40c43a25fb50..000000000000 --- a/Documentation/Books/Manual/Appendix/References/CursorObject.md +++ /dev/null @@ -1,201 +0,0 @@ -Sequential Access and Cursors -============================= - -If a query returns a cursor (for example by calling `db._query(...)`), then you can use *hasNext* and *next* to -iterate over the result set or *toArray* to convert it to an array. - -If the number of query results is expected to be big, it is possible to -limit the amount of documents transferred between the server and the client -to a specific value. This value is called *batchSize*. The *batchSize* -can optionally be set before or when a simple query is executed. -If the server has more documents than should be returned in a single batch, -the server will set the *hasMore* attribute in the result. It will also -return the id of the server-side cursor in the *id* attribute in the result. -This id can be used with the cursor API to fetch any outstanding results from -the server and dispose the server-side cursor afterwards. - -The initial *batchSize* value can be set using the *setBatchSize* -method that is available for each type of simple query, or when the simple -query is executed using its *execute* method. If no *batchSize* value -is specified, the server will pick a reasonable default value. - -Has Next --------- - - - - -checks if the cursor is exhausted -`cursor.hasNext()` - -The *hasNext* operator returns *true*, then the cursor still has -documents. In this case the next document can be accessed using the -*next* operator, which will advance the cursor. - - -**Examples** - - - @startDocuBlockInline cursorHasNext - @EXAMPLE_ARANGOSH_OUTPUT{cursorHasNext} - ~ db._create("five"); - ~ db.five.save({ name : "one" }); - ~ db.five.save({ name : "two" }); - ~ db.five.save({ name : "three" }); - ~ db.five.save({ name : "four" }); - ~ db.five.save({ name : "five" }); - var a = db._query("FOR x IN five RETURN x"); - while (a.hasNext()) print(a.next()); - ~ db._drop("five") - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock cursorHasNext - - -Next ----- - - - - -returns the next result document -`cursor.next()` - -If the *hasNext* operator returns *true*, then the underlying -cursor of the simple query still has documents. In this case the -next document can be accessed using the *next* operator, which -will advance the underlying cursor. If you use *next* on an -exhausted cursor, then *undefined* is returned. - - -**Examples** - - - @startDocuBlockInline cursorNext - @EXAMPLE_ARANGOSH_OUTPUT{cursorNext} - ~ db._create("five"); - ~ db.five.save({ name : "one" }); - ~ db.five.save({ name : "two" }); - ~ db.five.save({ name : "three" }); - ~ db.five.save({ name : "four" }); - ~ db.five.save({ name : "five" }); - db._query("FOR x IN five RETURN x").next(); - ~ db._drop("five") - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock cursorNext - - -Set Batch size --------------- - - - - -sets the batch size for any following requests -`cursor.setBatchSize(number)` - -Sets the batch size for queries. The batch size determines how many results -are at most transferred from the server to the client in one chunk. - - -Get Batch size --------------- - - - - -returns the batch size -`cursor.getBatchSize()` - -Returns the batch size for queries. If the returned value is undefined, the -server will determine a sensible batch size for any following requests. - - -Execute Query -------------- - - - - -executes a query -`query.execute(batchSize)` - -Executes a simple query. If the optional batchSize value is specified, -the server will return at most batchSize values in one roundtrip. -The batchSize cannot be adjusted after the query is first executed. - -**Note**: There is no need to explicitly call the execute method if another -means of fetching the query results is chosen. The following two approaches -lead to the same result: - - @startDocuBlockInline executeQueryNoBatchSize - @EXAMPLE_ARANGOSH_OUTPUT{executeQueryNoBatchSize} - ~ db._create("users"); - ~ db.users.save({ name: "Gerhard" }); - ~ db.users.save({ name: "Helmut" }); - ~ db.users.save({ name: "Angela" }); - result = db.users.all().toArray(); - | var q = db._query("FOR x IN users RETURN x"); - | result = [ ]; - | while (q.hasNext()) { - | result.push(q.next()); - } - ~ db._drop("users") - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock executeQueryNoBatchSize - -The following two alternatives both use a batchSize and return the same -result: - - @startDocuBlockInline executeQueryBatchSize - @EXAMPLE_ARANGOSH_OUTPUT{executeQueryBatchSize} - ~ db._create("users"); - ~ db.users.save({ name: "Gerhard" }); - ~ db.users.save({ name: "Helmut" }); - ~ db.users.save({ name: "Angela" }); - q = db.users.all(); q.setBatchSize(20); q.execute(); while (q.hasNext()) { print(q.next()); } - q = db.users.all(); q.execute(20); while (q.hasNext()) { print(q.next()); } - ~ db._drop("users") - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock executeQueryBatchSize - - - -Dispose -------- - - - - -disposes the result -`cursor.dispose()` - -If you are no longer interested in any further results, you should call -*dispose* in order to free any resources associated with the cursor. -After calling *dispose* you can no longer access the cursor. - - -Count ------ - - - - -counts the number of documents -`cursor.count()` - -The *count* operator counts the number of document in the result set and -returns that number. The *count* operator ignores any limits and returns -the total number of documents found. - -**Note**: Not all simple queries support counting. In this case *null* is -returned (Simple queries are deprecated). - -`cursor.count(true)` - -If the result set was limited by the *limit* operator or documents were -skiped using the *skip* operator, the *count* operator with argument -*true* will use the number of elements in the final result set - after -applying *limit* and *skip*. - -**Note**: Not all simple queries support counting. In this case *null* is -returned (Simple queries are deprecated).. diff --git a/Documentation/Books/Manual/Appendix/References/DBObject.md b/Documentation/Books/Manual/Appendix/References/DBObject.md deleted file mode 100644 index 4f5eb87d42f0..000000000000 --- a/Documentation/Books/Manual/Appendix/References/DBObject.md +++ /dev/null @@ -1,65 +0,0 @@ -The "db" Object -=============== - -The `db` object is available in [arangosh](../../Programs/Arangosh/README.md) by -default, and can also be imported and used in Foxx services. - -*db.name* returns a [collection object](CollectionObject.md) for the collection *name*. - -The following methods exists on the *_db* object: - -*Database* - -* [db._createDatabase(name, options, users)](../../DataModeling/Databases/WorkingWith.md#create-database) -* [db._databases()](../../DataModeling/Databases/WorkingWith.md#list-databases) -* [db._dropDatabase(name, options, users)](../../DataModeling/Databases/WorkingWith.md#drop-database) -* [db._useDatabase(name)](../../DataModeling/Databases/WorkingWith.md#use-database) - -*Indexes* - -* [db._index(index)](../../Indexing/WorkingWithIndexes.md#fetching-an-index-by-handle) -* [db._dropIndex(index)](../../Indexing/WorkingWithIndexes.md#dropping-an-index-via-a-database-handle) - -*Properties* - -* [db._id()](../../DataModeling/Databases/WorkingWith.md#id) -* [db._isSystem()](../../DataModeling/Databases/WorkingWith.md#issystem) -* [db._name()](../../DataModeling/Databases/WorkingWith.md#name) -* [db._path()](../../DataModeling/Databases/WorkingWith.md#path) -* [db._version()](../../DataModeling/Documents/DocumentMethods.md#get-the-version-of-arangodb) - -*Collection* - -* [db._collection(name)](../../DataModeling/Collections/DatabaseMethods.md#collection) -* [db._collections()](../../DataModeling/Collections/DatabaseMethods.md#all-collections) -* [db._create(name)](../../DataModeling/Collections/DatabaseMethods.md#create) -* [db._drop(name)](../../DataModeling/Collections/DatabaseMethods.md#drop) -* [db._truncate(name)](../../DataModeling/Collections/DatabaseMethods.md#truncate) - -*AQL* - -* [db._createStatement(query)](../../../AQL/Invocation/WithArangosh.html#with-createstatement-arangostatement) -* [db._query(query)](../../../AQL/Invocation/WithArangosh.html#with-dbquery) -* [db._explain(query)](../../ReleaseNotes/NewFeatures28.md#miscellaneous-improvements) -* [db._parse(query)](../../../AQL/Invocation/WithArangosh.html#query-validation) - -*Document* - -* [db._document(object)](../../DataModeling/Documents/DatabaseMethods.md#document) -* [db._exists(object)](../../DataModeling/Documents/DatabaseMethods.md#exists) -* [db._remove(selector)](../../DataModeling/Documents/DatabaseMethods.md#remove) -* [db._replace(selector,data)](../../DataModeling/Documents/DatabaseMethods.md#replace) -* [db._update(selector,data)](../../DataModeling/Documents/DatabaseMethods.md#update) - -*Views* - -* [db._view(name)](../../DataModeling/Views/DatabaseMethods.md#view) -* [db._views()](../../DataModeling/Views/DatabaseMethods.md#all-views) -* [db._createView(name, type, properties)](../../DataModeling/Views/DatabaseMethods.md#create) -* [db._dropView(name)](../../DataModeling/Views/DatabaseMethods.md#drop) - -*Global* - -* [db._engine()](../../DataModeling/Databases/WorkingWith.md#engine) -* [db._engineStats()](../../DataModeling/Databases/WorkingWith.md#engine-statistics) -* [db._executeTransaction()](../../Transactions/TransactionInvocation.md) diff --git a/Documentation/Books/Manual/Appendix/References/README.md b/Documentation/Books/Manual/Appendix/References/README.md deleted file mode 100644 index a1178bf54309..000000000000 --- a/Documentation/Books/Manual/Appendix/References/README.md +++ /dev/null @@ -1,5 +0,0 @@ -References -========== - -This section contains the documentation for some of the API's which are common on the -arango shell, arangod server and for Foxx Apps. diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/ActiveFailover/Architecture.md b/Documentation/Books/Manual/Architecture/DeploymentModes/ActiveFailover/Architecture.md deleted file mode 100644 index f2c298a6fecb..000000000000 --- a/Documentation/Books/Manual/Architecture/DeploymentModes/ActiveFailover/Architecture.md +++ /dev/null @@ -1,102 +0,0 @@ -Active Failover Architecture -============================ - -An _Active Failover_ is defined as: - -- One ArangoDB Single-Server instance which is read / writable by clients called **Leader** -- One or more ArangoDB Single-Server instances, which are passive and not writable - called **Followers**, which asynchronously replicate data from the master -- At least one _Agency_ acting as a "witness" to determine which server becomes the _leader_ - in a _failure_ situation - -![ArangoDB Active Failover](leader-follower.png) - -The advantage of the _Active Failover_ compared to the traditional [_Master/Slave_](../MasterSlave/README.md) -setup is that there is an active third party, the _Agency_ which observes and supervises -all involved server processes. _Follower_ instances can rely on the _Agency_ to -determine the correct _Leader_ server. From an operational point of view, one advantage is that -the failover, in case the _Leader_ goes down, is automatic. An additional operational -advantage is that there is no need to start a _replication applier_ manually. - -The _Active Failover_ setup is made **resilient** by the fact that all the official -ArangoDB drivers can automatically determine the correct _leader_ server and -redirect requests appropriately. Furthermore Foxx Services do also automatically -perform a failover: should the _leader_ instance fail (which is also the _Foxxmaster_) -the newly elected _leader_ will reinstall all Foxx services and resume executing -queued [Foxx tasks](../../../Foxx/Guides/Scripts.md). -[Database users](../../../Administration/ManagingUsers/README.md) -which were created on the _leader_ will also be valid on the newly elected _leader_ -(always depending on the condition that they were synced already). - -Consider the case for two *arangod* instances. The two servers are connected via -server wide (global) asynchronous replication. One of the servers is -elected _Leader_, and the other one is made a _Follower_ automatically. At startup, -the two servers race for the leadership position. This happens through the _agency -locking mechanism_ (which means that the _Agency_ needs to be available at server start). -You can control which server will become _Leader_ by starting it earlier than -other server instances in the beginning. - -The _Follower_ will automatically start replication from the _Leader_ for all -available databases, using the server-level replication introduced in v. 3.3. - -When the _Leader_ goes down, this is automatically detected by the _Agency_ -instance, which is also started in this mode. This instance will make the -previous follower stop its replication and make it the new _Leader_. - -{% hint 'info' %} -The different instances participating in an Active Failover setup are supposed -to be run in the same _Data Center_ (DC), with a reliable high-speed network -connection between all the machines participating in the Active Failover setup. - -Multi-datacenter Active Failover setups are currently not supported. - -A multi-datacenter solution currently supported is the Datacenter to Datacenter replication -(DC2DC) among ArangoDB Clusters. See [DC2DC](../DC2DC/README.md) chapter for details. -{% endhint %} - -Operative Behaviour -------------------- - -In contrast to the normal behaviour of a single-server instance, the Active-Failover -mode will change the behaviour of ArangoDB in some situations. - -The _Follower_ will _always_ deny write requests from client applications. Starting from ArangoDB 3.4 -read requests are _only_ permitted if the requests is marked with the `X-Arango-Allow-Dirty-Read: true` header, -otherwise they are denied too. -Only the replication itself is allowed to access the follower's data until the -follower becomes a new _Leader_ (should a _failover_ happen). - -When sending a request to read or write data on a _Follower_, the _Follower_ will -respond with `HTTP 503 (Service unavailable)` and provide the address of -the current _Leader_. Client applications and drivers can use this information to -then make a follow-up request to the proper _Leader_: - -``` -HTTP/1.1 503 Service Unavailable -X-Arango-Endpoint: http://[::1]:8531 -.... -``` - -Client applications can also detect who the current _Leader_ and the _Followers_ -are by calling the `/_api/cluster/endpoints` REST API. This API is accessible -on _Leader_ and _Followers_ alike. - -Reading from Followers ----------------------- - -Followers in the active-failover setup are in read-only mode. It is possible to read from these -followers by adding a `X-Arango-Allow-Dirty-Read: true` header on each request. Responses will then automatically -contain the `X-Arango-Potential-Dirty-Read: true` header so that clients can reject accidental dirty reads. - -Depending on the driver support for your specific programming language, you should be able -to enable this option. - -Tooling Support ---------------- - -The tool _ArangoDB Starter_ supports starting two servers with asynchronous -replication and failover [out of the box](../../../Deployment/ActiveFailover/UsingTheStarter.md). - -The _arangojs_ driver for JavaScript, the Go driver, the Java driver, ArangoJS and -the PHP driver support active failover in case the currently accessed server endpoint -responds with `HTTP 503`. diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/ActiveFailover/Limitations.md b/Documentation/Books/Manual/Architecture/DeploymentModes/ActiveFailover/Limitations.md deleted file mode 100644 index 28f743ca67c1..000000000000 --- a/Documentation/Books/Manual/Architecture/DeploymentModes/ActiveFailover/Limitations.md +++ /dev/null @@ -1,14 +0,0 @@ -Active Failover Limitations -=========================== - -The _Active Failover_ setup in ArangoDB has a few limitations. Some of these limitations -may be removed in later versions of ArangoDB: - -- Should you add more than one _follower_, be aware that during a _failover_ situation - the failover attempts to pick the most up to date follower as the new leader on a **best-effort** basis. -- In contrast to full ArangoDB Cluster (with synchronous replication), there is **no guarantee** on - how many database operations may have been lost during a failover. -- Should you be using the [ArangoDB Starter](../../../Programs/Starter/README.md) - or the [Kubernetes Operator](../../../Deployment/Kubernetes/README.md) to manage your Active-Failover - deployment, be aware that upgrading might trigger an unintentional failover between machines. - diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/ActiveFailover/README.md b/Documentation/Books/Manual/Architecture/DeploymentModes/ActiveFailover/README.md deleted file mode 100644 index bc34a392ab3e..000000000000 --- a/Documentation/Books/Manual/Architecture/DeploymentModes/ActiveFailover/README.md +++ /dev/null @@ -1,21 +0,0 @@ -Active Failover -=============== - -This _Chapter_ introduces ArangoDB's _Active Failover_ environment. - -**Sections:** - -- [Active Failover Architecture](Architecture.md) -- [Active Failover Limitations](Limitations.md) - -For further information about _Active Failover_ in ArangoDB, please refer to the following -sections included in other chapters of this Manual: - -- [Active Failover Deployment](../../../Deployment/ActiveFailover/README.md) -- [Active Failover Administration](../../../Administration/ActiveFailover/README.md) - -**Note:** _Asynchronous Failover_, _Resilient Single_, _Active-Passive_ or _Hot -Standby_ are other terms that have been used to define the _Active Failover_ environment. -Starting from version 3.3 _Active Failover_ is the preferred term to identify such -environment. - diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/ActiveFailover/leader-follower.png b/Documentation/Books/Manual/Architecture/DeploymentModes/ActiveFailover/leader-follower.png deleted file mode 100644 index 4dd6cdf973a3..000000000000 Binary files a/Documentation/Books/Manual/Architecture/DeploymentModes/ActiveFailover/leader-follower.png and /dev/null differ diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/ActiveFailover/leader-follower.xcf b/Documentation/Books/Manual/Architecture/DeploymentModes/ActiveFailover/leader-follower.xcf deleted file mode 100644 index a993837278d2..000000000000 Binary files a/Documentation/Books/Manual/Architecture/DeploymentModes/ActiveFailover/leader-follower.xcf and /dev/null differ diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/Cluster/Architecture.md b/Documentation/Books/Manual/Architecture/DeploymentModes/Cluster/Architecture.md deleted file mode 100644 index 3b129d474dc6..000000000000 --- a/Documentation/Books/Manual/Architecture/DeploymentModes/Cluster/Architecture.md +++ /dev/null @@ -1,395 +0,0 @@ -Cluster Architecture -==================== - -The Cluster architecture of ArangoDB is a _CP_ master/master model with no -single point of failure. - -With "CP" in terms of the [CAP theorem](https://en.wikipedia.org/wiki/CAP_theorem) -we mean that in the presence of a -network partition, the database prefers internal consistency over -availability. With "master/master" we mean that clients can send their -requests to an arbitrary node, and experience the same view on the -database regardless. "No single point of failure" means that the cluster -can continue to serve requests, even if one machine fails completely. - -In this way, ArangoDB has been designed as a distributed multi-model -database. This section gives a short outline on the Cluster architecture and -how the above features and capabilities are achieved. - -Structure of an ArangoDB Cluster --------------------------------- - -An ArangoDB Cluster consists of a number of ArangoDB instances -which talk to each other over the network. They play different roles, -which will be explained in detail below. - -The current configuration -of the Cluster is held in the _Agency_, which is a highly-available -resilient key/value store based on an odd number of ArangoDB instances -running [Raft Consensus Protocol](https://raft.github.io/). - -For the various instances in an ArangoDB Cluster there are three distinct -roles: - -- _Agents_ -- _Coordinators_ -- _DBServers_. - -In the following sections we will shed light on each of them. - -![ArangoDB Cluster](cluster_topology.png) - -### Agents - -One or multiple _Agents_ form the _Agency_ in an ArangoDB Cluster. The -_Agency_ is the central place to store the configuration in a Cluster. It -performs leader elections and provides other synchronization services for -the whole Cluster. Without the _Agency_ none of the other components can -operate. - -While generally invisible to the outside the _Agency_ is the heart of the -Cluster. As such, fault tolerance is of course a must have for the -_Agency_. To achieve that the _Agents_ are using the [Raft Consensus -Algorithm](https://raft.github.io/). The algorithm formally guarantees -conflict free configuration management within the ArangoDB Cluster. - -At its core the _Agency_ manages a big configuration tree. It supports -transactional read and write operations on this tree, and other servers -can subscribe to HTTP callbacks for all changes to the tree. - -### Coordinators - -_Coordinators_ should be accessible from the outside. These are the ones -the clients talk to. They will coordinate cluster tasks like -executing queries and running Foxx services. They know where the -data is stored and will optimize where to run user supplied queries or -parts thereof. _Coordinators_ are stateless and can thus easily be shut down -and restarted as needed. - -### DBServers - -_DBservers_ are the ones where the data is actually hosted. They -host shards of data and using synchronous replication a _DBServer_ may -either be _leader_ or _follower_ for a shard. Document operations are first -applied on the _leader_ and then synchronously replicated to -all followers. - -Shards must not be accessed from the outside but indirectly through the -_Coordinators_. They may also execute queries in part or as a whole when -asked by a _Coordinator_. - -See [Sharding](#sharding) below for more information. - -Many sensible configurations ----------------------------- - -This architecture is very flexible and thus allows many configurations, -which are suitable for different usage scenarios: - - 1. The default configuration is to run exactly one _Coordinator_ and - one _DBServer_ on each machine. This achieves the classical - master/master setup, since there is a perfect symmetry between the - different nodes, clients can equally well talk to any one of the - _Coordinators_ and all expose the same view to the data store. _Agents_ - can run on separate, less powerful machines. - 2. One can deploy more _Coordinators_ than _DBservers_. This is a sensible - approach if one needs a lot of CPU power for the Foxx services, - because they run on the _Coordinators_. - 3. One can deploy more _DBServers_ than _Coordinators_ if more data capacity - is needed and the query performance is the lesser bottleneck - 4. One can deploy a _Coordinator_ on each machine where an application - server (e.g. a node.js server) runs, and the _Agents_ and _DBServers_ - on a separate set of machines elsewhere. This avoids a network hop - between the application server and the database and thus decreases - latency. Essentially, this moves some of the database distribution - logic to the machine where the client runs. - -As you can see, the _Coordinator_ layer can be scaled and deployed independently -from the _DBServer_ layer. - -{% hint 'warning' %} -It is a best practice and a recommended approach to run _Agent_ instances -on different machines than _DBServer_ instances. - -When deploying using the tool [_Starter_](../../../Deployment/ArangoDBStarter/README.md) -this can be achieved by using the options `--cluster.start-dbserver=false` and -`--cluster.start-coordinator=false` on the first three machines where the _Starter_ -is started, if the desired _Agency_ _size_ is 3, or on the first 5 machines -if the desired _Agency_ _size_ is 5. -{% endhint %} - -{% hint 'info' %} -The different instances that form a Cluster are supposed to be run in the same -_Data Center_ (DC), with reliable and high-speed network connection between -all the machines participating to the Cluster. - -Multi-datacenter Clusters, where the entire structure and content of a Cluster located -in a specific DC is replicated to others Clusters located in different DCs, are -possible as well. See [Datacenter to datacenter replication](../DC2DC/README.md) -(DC2DC) for further details. -{% endhint %} - -Cluster ID ----------- - -Every non-Agency ArangoDB instance in a Cluster is assigned a unique -ID during its startup. Using its ID a node is identifiable -throughout the Cluster. All cluster operations will communicate -via this ID. - -Sharding --------- - -Using the roles outlined above an ArangoDB Cluster is able to distribute -data in so called _shards_ across multiple _DBServers_. From the outside -this process is fully transparent and as such we achieve the goals of -what other systems call "master-master replication". - -In an ArangoDB Cluster you talk to any _Coordinator_ and whenever you read or write data -it will automatically figure out where the data is stored (read) or to -be stored (write). The information about the _shards_ is shared across the -_Coordinators_ using the _Agency_. - -ArangoDB organizes its collection data in _shards_. Sharding -allows to use multiple machines to run a cluster of ArangoDB -instances that together constitute a single database. This enables -you to store much more data, since ArangoDB distributes the data -automatically to the different servers. In many situations one can -also reap a benefit in data throughput, again because the load can -be distributed to multiple machines. - -_Shards_ are configured per _collection_ so multiple _shards_ of data form -the _collection_ as a whole. To determine in which _shard_ the data is to -be stored ArangoDB performs a hash across the values. By default this -hash is being created from the document __key_. - -For further information, please refer to the -[_Cluster Administration_ ](../../../Administration/Cluster/README.md#sharding) section. - -Synchronous replication ------------------------ - -In an ArangoDB Cluster, the replication among the data stored by the _DBServers_ -is synchronous. - -Synchronous replication works on a per-shard basis. Using the option _replicationFactor_, -one configures for each _collection_ how many copies of each _shard_ are kept in the Cluster. - -{% hint 'danger' %} -If a collection has a _replication factor_ of 1, its data is **not** -replicated to other _DBServers_. This exposes you to a risk of data loss, if -the machine running the _DBServer_ with the only copy of the data fails permanently. - -The _replication factor_ has to be set to a value equals or higher than 2 -to achieve minimal data redundancy via the synchronous replication. - -An equal-or-higher-than 2 _replication factor_ has to be set **explicitly** -when the collection is created, or can be set later at run time if you forgot -to set it at creation time. - -When using a Cluster, please make sure all the collections that are important -(and should not be lost in any case) have a _replication factor_ equal or higher -than 2. -{% endhint %} - -At any given time, one of the copies is declared to be the _leader_ and -all other replicas are _followers_. Internally, write operations for this _shard_ -are always sent to the _DBServer_ which happens to hold the _leader_ copy, -which in turn replicates the changes to all _followers_ before the operation -is considered to be done and reported back to the _Coordinator_. -Internally, read operations are all served by the _DBServer_ holding the _leader_ copy, -this allows to provide snapshot semantics for complex transactions. - -Using synchronous replication alone will guarantee consistency and high availability -at the cost of reduced performance: write requests will have a higher latency -(due to every write-request having to be executed on the _followers_) and -read requests will not scale out as only the _leader_ is being asked. - -In a Cluster, synchronous replication will be managed by the _Coordinators_ for the client. -The data will always be stored on the _DBServers_. - -The following example will give you an idea of how synchronous operation -has been implemented in ArangoDB Cluster: - -1. Connect to a _Coordinator_ via [_arangosh_](../../../Programs/Arangosh/README.md) -2. Create a collection - - 127.0.0.1:8530@_system> db._create("test", {"replicationFactor": 2}) - -3. The _Coordinator_ will figure out a *leader* and one *follower* and create - one *shard* (as this is the default) -4. Insert data - - 127.0.0.1:8530@_system> db.test.insert({"foo": "bar"}) - -5. The _Coordinator_ will write the data to the _leader_, which in turn will -replicate it to the _follower_. -6. Only when both were successful the result is reported to be successful: - - ```json - { - "_id" : "test/7987", - "_key" : "7987", - "_rev" : "7987" - } - ``` - -Obviously, synchronous replication comes at the cost of an increased latency for -write operations, simply because there is one more network hop within the -Cluster for every request. Therefore the user can set the _replicationFactor_ -to 1, which means that only one copy of each shard is kept, thereby -switching off synchronous replication. This is a suitable setting for -less important or easily recoverable data for which low latency write -operations matter. - -Automatic failover ------------------- - -### Failure of a follower - -If a _DBServer_ that holds a _follower_ copy of a _shard_ fails, then the _leader_ -can no longer synchronize its changes to that _follower_. After a short timeout -(3 seconds), the _leader_ gives up on the _follower_ and declares it to be -out of sync. - -One of the following two cases can happen: - -**a)** If another _DBServer_ (that does not hold a _replica_ for this _shard_ already) - is available in the Cluster, a new _follower_ will automatically - be created on this other _DBServer_ (so the _replication factor_ constraint is - satisfied again). - -**b)** If no other _DBServer_ (that does not hold a _replica_ for this _shard_ already) - is available, the service continues with one _follower_ less than the number - prescribed by the _replication factor_. - -If the old _DBServer_ with the _follower_ copy comes back, one of the following -two cases can happen: - -**a)** If previously we were in case a), the _DBServer_ recognizes that there is a new - _follower_ that was elected in the meantime, so it will no longer be a _follower_ - for that _shard_. - -**b)** If previously we were in case b), the _DBServer_ automatically resynchronizes its - data with the _leader_. The _replication factor_ constraint is now satisfied again - and order is restored. - -### Failure of a leader - -If a _DBServer_ that holds a _leader_ copy of a shard fails, then the _leader_ -can no longer serve any requests. It will no longer send a heartbeat to -the _Agency_. Therefore, a _supervision_ process running in the _Raft_ _leader_ -of the Agency, can take the necessary action (after 15 seconds of missing -heartbeats), namely to promote one of the _DBServers_ that hold in-sync -replicas of the _shard_ to _leader_ for that _shard_. This involves a -reconfiguration in the _Agency_ and leads to the fact that _Coordinators_ -now contact a different _DBServer_ for requests to this _shard_. Service -resumes. The other surviving _replicas_ automatically resynchronize their -data with the new _leader_. - -In addition to the above, one of the following two cases cases can happen: - -a) If another _DBServer_ (that does not hold a _replica_ for this _shard_ already) - is available in the Cluster, a new _follower_ will automatically - be created on this other _DBServer_ (so the _replication factor_ constraint is - satisfied again). -b) If no other _DBServer_ (that does not hold a _replica_ for this _shard_ already) - is available the service continues with one _follower_ less than the number - prescribed by the _replication factor_. - -When the _DBServer_ with the original _leader_ copy comes back, it recognizes -that a new _leader_ was elected in the meantime, and one of the following -two cases can happen: - -a) If previously we were in case a), since also a new _follower_ was created and - the _replication factor_ constraint is satisfied, the _DBServer_ will no - longer be a _follower_ for that _shard_. -b) If previously we were in case b), the _DBServer_ notices that it now holds - a _follower_ _replica_ of that _shard_ and it resynchronizes its data with the - new _leader_. The _replication factor_ constraint is now satisfied again, - and order is restored. - -The following example will give you an idea of how _failover_ -has been implemented in ArangoDB Cluster: - -1. The _leader_ of a _shard_ (let's name it _DBServer001_) is going down. -2. A _Coordinator_ is asked to return a document: - - 127.0.0.1:8530@_system> db.test.document("100069") - -3. The _Coordinator_ determines which server is responsible for this document - and finds _DBServer001_ -4. The _Coordinator_ tries to contact _DBServer001_ and timeouts because it is - not reachable. -5. After a short while the _supervision_ (running in parallel on the _Agency_) - will see that _heartbeats_ from _DBServer001_ are not coming in -6. The _supervision_ promotes one of the _followers_ (say _DBServer002_), that - is in sync, to be _leader_ and makes _DBServer001_ a _follower_. -7. As the _Coordinator_ continues trying to fetch the document it will see that - the _leader_ changed to _DBServer002_ -8. The _Coordinator_ tries to contact the new _leader_ (_DBServer002_) and returns - the result: - - ```json - { - "_key" : "100069", - "_id" : "test/100069", - "_rev" : "513", - "foo" : "bar" - } - ``` -9. After a while the _supervision_ declares _DBServer001_ to be completely dead. -10. A new _follower_ is determined from the pool of _DBservers_. -11. The new _follower_ syncs its data from the _leader_ and order is restored. - -Please note that there may still be timeouts. Depending on when exactly -the request has been done (in regard to the _supervision_) and depending -on the time needed to reconfigure the Cluster the _Coordinator_ might fail -with a timeout error. - -Shard movement and resynchronization ------------------------------------- - -All _shard_ data synchronizations are done in an incremental way, such that -resynchronizations are quick. This technology allows to move shards -(_follower_ and _leader_ ones) between _DBServers_ without service interruptions. -Therefore, an ArangoDB Cluster can move all the data on a specific _DBServer_ -to other _DBServers_ and then shut down that server in a controlled way. -This allows to scale down an ArangoDB Cluster without service interruption, -loss of fault tolerance or data loss. Furthermore, one can re-balance the -distribution of the _shards_, either manually or automatically. - -All these operations can be triggered via a REST/JSON API or via the -graphical web UI. All fail-over operations are completely handled within -the ArangoDB Cluster. - -Microservices and zero administation ------------------------------------- - -The design and capabilities of ArangoDB are geared towards usage in -modern microservice architectures of applications. With the -[Foxx services](../../../Foxx/README.md) it is very easy to deploy a data -centric microservice within an ArangoDB Cluster. - -In addition, one can deploy multiple instances of ArangoDB within the -same project. One part of the project might need a scalable document -store, another might need a graph database, and yet another might need -the full power of a multi-model database actually mixing the various -data models. There are enormous efficiency benefits to be reaped by -being able to use a single technology for various roles in a project. - -To simplify life of the _devops_ in such a scenario we try as much as -possible to use a _zero administration_ approach for ArangoDB. A running -ArangoDB Cluster is resilient against failures and essentially repairs -itself in case of temporary failures. - -Deployment ----------- - -An ArangoDB Cluster can be deployed in several ways, e.g. by manually -starting all the needed instances, by using the tool -[_Starter_](../../../Programs/Starter/README.md), in -Docker, in Mesos or DC/OS, and in Kubernetes. - -See the [Cluster Deployment](../../../Deployment/Cluster/README.md) -chapter for instructions. diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/Cluster/DataModels.md b/Documentation/Books/Manual/Architecture/DeploymentModes/Cluster/DataModels.md deleted file mode 100644 index 0ef0ec668aa5..000000000000 --- a/Documentation/Books/Manual/Architecture/DeploymentModes/Cluster/DataModels.md +++ /dev/null @@ -1,75 +0,0 @@ -Different data models and scalability -===================================== - -In this section we discuss scalability in the context of the different -data models supported by ArangoDB. - -Key/value pairs ---------------- - -The key/value store data model is the easiest to scale. In ArangoDB, -this is implemented in the sense that a document collection always has -a primary key `_key` attribute and in the absence of further secondary -indexes the document collection behaves like a simple key/value store. - -The only operations that are possible in this context are single key -lookups and key/value pair insertions and updates. If `_key` is the -only sharding attribute then the sharding is done with respect to the -primary key and all these operations scale linearly. If the sharding is -done using different shard keys, then a lookup of a single key involves -asking all shards and thus does not scale linearly. - -Document store --------------- - -For the document store case even in the presence of secondary indexes -essentially the same arguments apply, since an index for a sharded -collection is simply the same as a local index for each shard. Therefore, -single document operations still scale linearly with the size of the -cluster, unless a special sharding configuration makes lookups or -write operations more expensive. - -For a deeper analysis of this topic see -[this blog post](https://mesosphere.com/blog/2015/11/30/arangodb-benchmark-dcos/) -in which good linear scalability of ArangoDB for single document operations -is demonstrated. - - -Complex queries and joins -------------------------- - -The AQL query language allows complex queries, using multiple -collections, secondary indexes as well as joins. In particular with -the latter, scaling can be a challenge, since if the data to be -joined resides on different machines, a lot of communication -has to happen. The AQL query execution engine organizes a data -pipeline across the cluster to put together the results in the -most efficient way. The query optimizer is aware of the cluster -structure and knows what data is where and how it is indexed. -Therefore, it can arrive at an informed decision about what parts -of the query ought to run where in the cluster. - -Nevertheless, for certain complicated joins, there are limits as -to what can be achieved. - - -Graph database --------------- - -Graph databases are particularly good at queries on graphs that involve -paths in the graph of an a priori unknown length. For example, finding -the shortest path between two vertices in a graph, or finding all -paths that match a certain pattern starting at a given vertex are such -examples. - -However, if the vertices and edges along the occurring paths are -distributed across the cluster, then a lot of communication is -necessary between nodes, and performance suffers. To achieve good -performance at scale, it is therefore necessary to get the -distribution of the graph data across the shards in the cluster -right. Most of the time, the application developers and users of -ArangoDB know best, how their graphs are structured. Therefore, -ArangoDB allows users to specify, according to which attributes -the graph data is sharded. A useful first step is usually to make -sure that the edges originating at a vertex reside on the same -cluster node as the vertex. diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/Cluster/Limitations.md b/Documentation/Books/Manual/Architecture/DeploymentModes/Cluster/Limitations.md deleted file mode 100644 index 3c7a3acaa06a..000000000000 --- a/Documentation/Books/Manual/Architecture/DeploymentModes/Cluster/Limitations.md +++ /dev/null @@ -1,14 +0,0 @@ -Cluster Limitations -=================== - -ArangoDB has no built-in limitations to horizontal scalability. The -central resilient _Agency_ will easily sustain hundreds of _DBservers_ -and _Coordinators_, and the usual database operations work completely -decentrally and do not require assistance of the _Agency_. - -Likewise, the supervision process in the _Agency_ can easily deal -with lots of servers, since all its activities are not performance -critical. - -Obviously, an ArangoDB Cluster is limited by the available resources -of CPU, memory, disk and network bandwidth and latency. diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/Cluster/README.md b/Documentation/Books/Manual/Architecture/DeploymentModes/Cluster/README.md deleted file mode 100644 index c5c852d9d738..000000000000 --- a/Documentation/Books/Manual/Architecture/DeploymentModes/Cluster/README.md +++ /dev/null @@ -1,20 +0,0 @@ -Cluster -======= - -This _Chapter_ introduces ArangoDB's Cluster. - -**Sections:** - -- [Cluster Architecture](Architecture.md) -- [Data models](DataModels.md) -- [Cluster Limitations](Limitations.md) - -For further information about the Cluster, please refer to the following sections included in other chapters of this Manual: - -- [Cluster Deployment](../../../Deployment/Cluster/README.md) -- [Cluster Administration](../../../Administration/Cluster/README.md) -- [Cluster Troubleshooting](../../../Troubleshooting/Cluster/README.md) - -Be sure to check out the -[ArangoDB Cluster Administration Course](https://www.arangodb.com/arangodb-cluster-course/) -as well. diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/Cluster/cluster_topology.png b/Documentation/Books/Manual/Architecture/DeploymentModes/Cluster/cluster_topology.png deleted file mode 100644 index a883e993ba4d..000000000000 Binary files a/Documentation/Books/Manual/Architecture/DeploymentModes/Cluster/cluster_topology.png and /dev/null differ diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/DC2DC/Applicability.md b/Documentation/Books/Manual/Architecture/DeploymentModes/DC2DC/Applicability.md deleted file mode 100644 index 30d9db012f69..000000000000 --- a/Documentation/Books/Manual/Architecture/DeploymentModes/DC2DC/Applicability.md +++ /dev/null @@ -1,15 +0,0 @@ - -# When to use it... and when not - -The _datacenter to datacenter replication_ is a good solution in all cases where -you want to replicate data from one cluster to another without the requirement -that the data is available immediately in the other cluster. - -The _datacenter to datacenter replication_ is not a good solution when one of the -following applies: - -- You want to replicate data from cluster A to cluster B and from cluster B - to cluster A at the same time. -- You need synchronous replication between 2 clusters. -- There is no network connection between cluster A and B. -- You want complete control over which database, collection & documents are replicate and which not. diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/DC2DC/Introduction.md b/Documentation/Books/Manual/Architecture/DeploymentModes/DC2DC/Introduction.md deleted file mode 100644 index 3a470a818bab..000000000000 --- a/Documentation/Books/Manual/Architecture/DeploymentModes/DC2DC/Introduction.md +++ /dev/null @@ -1,67 +0,0 @@ - -# Introduction - -At some point in the grows of a database, there comes a need for replicating it -across multiple datacenters. - -Reasons for that can be: - -- Fallback in case of a disaster in one datacenter -- Regional availability -- Separation of concerns - -And many more. - -Starting from version 3.3, ArangoDB supports _datacenter to datacenter -replication_, via the _ArangoSync_ tool. - -ArangoDB's _datacenter to datacenter replication_ is a solution that enables you -to asynchronously replicate the entire structure and content in an ArangoDB Cluster -in one place to a Cluster in another place. Typically it is used from one datacenter -to another. It is possible to replicate to multiple other datacenters as well. -
It is not a solution for replicating single server instances. - -![ArangoDB DC2DC](dc2dc.png) - -The replication done by _ArangoSync_ is **asynchronous**. That means that when -a client is writing data into the source datacenter, it will consider the -request finished before the data has been replicated to the other datacenter. -The time needed to completely replicate changes to the other datacenter is -typically in the order of seconds, but this can vary significantly depending on -load, network & computer capacity. - -_ArangoSync_ performs replication in a **single direction** only. That means that -you can replicate data from cluster _A_ to cluster _B_ or from cluster _B_ to -cluster _A_, but never at the same time (one master, one or more slave clusters). -
Data modified in the destination cluster **will be lost!** - -Replication is a completely **autonomous** process. Once it is configured it is -designed to run 24/7 without frequent manual intervention. -
This does not mean that it requires no maintenance or attention at all. -
As with any distributed system some attention is needed to monitor its operation -and keep it secure (e.g. certificate & password rotation). - -In the event of an outage of the master cluster, user intervention is required -to either bring the master back up or to decide on making a slave cluster the -new master. There is no automatic failover as slave clusters lag behind the master -because of network latency etc. and resuming operation with the state of a slave -cluster can therefore result in the loss of recent writes. How much can be lost -largely depends on the data rate of the master cluster and the delay between -the master and the slaves. Slaves will typically be behind the master by a couple -of seconds or minutes. - -Once configured, _ArangoSync_ will replicate both **structure and data** of an -**entire cluster**. This means that there is no need to make additional configuration -changes when adding/removing databases or collections. -
Also meta data such as users, Foxx application & jobs are automatically replicated. - -A message queue is used for replication. You can use either of the following: - -- **DirectMQ** (recommended): - Message queue developed by ArangoDB in Go. Tailored for DC2DC replication - with efficient native networking routines. Available since ArangoSync version 0.5.0 - (shipped with ArangoDB Enterprise Edition v3.3.8). -- **Kafka**: - Complex general purpose message queue system. Requires Java and potentially - fine-tuning. A too small message size can cause problems with ArangoSync. - Supported by all ArangoSync versions (ArangoDB Enterprise Edition v3.3.0 and above). diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/DC2DC/Limitations.md b/Documentation/Books/Manual/Architecture/DeploymentModes/DC2DC/Limitations.md deleted file mode 100644 index de41a7d58d8d..000000000000 --- a/Documentation/Books/Manual/Architecture/DeploymentModes/DC2DC/Limitations.md +++ /dev/null @@ -1,16 +0,0 @@ - -Limitations -=========== - -The _datacenter to datacenter replication_ setup in ArangoDB has a few limitations. -Some of these limitations may be removed in later versions of ArangoDB: - -- All the machines where the ArangoDB Server processes run must run the Linux - operating system using the AMD64 architecture. Clients can run from any platform. - -- All the machines where the ArangoSync Server processes run must run the Linux - operating system using the AMD64 architecture. - The ArangoSync command line tools is available for Linux, Windows & macOS. - -- The entire cluster is replicated. It is not possible to exclude specific - databases or collections from replication. diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/DC2DC/README.md b/Documentation/Books/Manual/Architecture/DeploymentModes/DC2DC/README.md deleted file mode 100644 index ccecfcb843f9..000000000000 --- a/Documentation/Books/Manual/Architecture/DeploymentModes/DC2DC/README.md +++ /dev/null @@ -1,25 +0,0 @@ - -# Datacenter to datacenter replication - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -This chapter introduces ArangoDB's _datacenter to datacenter replication_ (DC2DC). - -**Sections:** - -- [Introduction](Introduction.md) -- [Applicability](Applicability.md) -- [Requirements](Requirements.md) -- [Limitations](Limitations.md) - -For further information about _datacenter to datacenter replication_, please refer to the following sections included in other chapters of this Manual: - -- [Deployment](../../../Deployment/DC2DC/README.md) -- [Administration](../../../Administration/DC2DC/README.md) -- [Troubleshooting](../../../Troubleshooting/DC2DC/README.md) -- [Monitoring](../../../Monitoring/DC2DC/README.md) -- [Security](../../../Security/DC2DC/README.md) - diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/DC2DC/Requirements.md b/Documentation/Books/Manual/Architecture/DeploymentModes/DC2DC/Requirements.md deleted file mode 100644 index 0ab4e496320b..000000000000 --- a/Documentation/Books/Manual/Architecture/DeploymentModes/DC2DC/Requirements.md +++ /dev/null @@ -1,29 +0,0 @@ - -# Requirements - -To use _datacenter to datacenter replication_ you need the following: - -- Two datacenters, each running an ArangoDB Enterprise Edition cluster, - version 3.3 or higher, using the RocksDB storage engine. -- A network connection between both datacenters with accessible endpoints - for several components (see individual components for details). -- TLS certificates for ArangoSync master instances (can be self-signed). -- Optional (but recommended) TLS certificates for ArangoDB clusters (can be self-signed). -- Client certificates CA for _ArangoSync masters_ (typically self-signed). -- Client certificates for _ArangoSync masters_ (typically self-signed). -- At least 2 instances of the _ArangoSync master_ in each datacenter. -- One instances of the _ArangoSync worker_ on every machine in each datacenter. - -When using the `kafka` type message queue, you also need: - -- TLS certificates for Kafka brokers (can be self-signed). - -Note: In several places you will need a (x509) certificate. -
The [Certificates](../../../Security/DC2DC/README.md#certificates) section provides more guidance for creating -and renewing these certificates. - -Besides the above list, you probably want to use the following: - -- An orchestrator to keep all components running, e.g. `systemd`. -- A log file collector for centralized collection & access to the logs of all components. -- A metrics collector & viewing solution such as _Prometheus_ + _Grafana_. diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/DC2DC/dc2dc.png b/Documentation/Books/Manual/Architecture/DeploymentModes/DC2DC/dc2dc.png deleted file mode 100644 index ca27ab52f2c5..000000000000 Binary files a/Documentation/Books/Manual/Architecture/DeploymentModes/DC2DC/dc2dc.png and /dev/null differ diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/MasterSlave/Architecture.md b/Documentation/Books/Manual/Architecture/DeploymentModes/MasterSlave/Architecture.md deleted file mode 100644 index adda1b1af3ec..000000000000 --- a/Documentation/Books/Manual/Architecture/DeploymentModes/MasterSlave/Architecture.md +++ /dev/null @@ -1,248 +0,0 @@ -Master/Slave Architecture -========================= - -Introduction ------------- - -In a _Master/Slave_ setup one or more ArangoDB _slaves_ asynchronously replicate -from a _master_. - -The _master_ is the ArangoDB instance where all data-modification operations should -be directed to. The _slave_ is the ArangoDB instance that replicates the data from -the master. - -Components ----------- - -### Replication Logger - -**Purpose** - -The _replication logger_ will write all data-modification operations into the -_write-ahead log_. This log may then be read by clients to replay any data -modification on a different server. - -**Checking the state** - -To query the current state of the _logger_, use the *state* command: - - require("@arangodb/replication").logger.state(); - -The result might look like this: - -```js -{ - "state" : { - "running" : true, - "lastLogTick" : "2064735086", - "lastUncommittedLogTick" : "2064735086", - "totalEvents" : 2064735086, - "time" : "2019-03-01T11:38:39Z" - }, - "server" : { - "version" : "3.4.4", - "serverId" : "135694526467864", - "engine" : "rocksdb" - }, - "clients" : [ - { - "serverId" : "46402312160836", - "time" : "2019-03-01T11:38:39Z", - "expires" : "2019-03-01T13:38:39Z", - "lastServedTick" : "2064459411" - }, - { - "serverId" : "260321896124903", - "time" : "2019-03-01T11:29:45Z", - "expires" : "2019-03-01T13:29:45Z", - "lastServedTick" : "2002717896" - } - ] -} -``` - -The *running* attribute will always be true. In earlier versions of ArangoDB the -replication was optional and this could have been *false*. - -The *totalEvents* attribute indicates how many log events have been logged since -the start of the ArangoDB server. The *lastLogTick* value indicates the _id_ of the -last committed operation that was written to the server's _write-ahead log_. -It can be used to determine whether new operations were logged, and is also used -by the _replication applier_ for incremental fetching of data. The *lastUncommittedLogTick* -value contains the _id_ of the last uncommitted operation that was written to the -server's WAL. For the RocksDB storage engine, *lastLogTick* and *lastUncommittedLogTick* -are identical, as the WAL only contains committed operations. - -The *clients* attribute reveals which clients (slaves) have connected to the -master recently, and up to which tick value they caught up with the replication. - -**Note**: The replication logger state can also be queried via the -[HTTP API](../../../../HTTP/Replications/index.html). - -To query which data ranges are still available for replication clients to fetch, -the logger provides the *firstTick* and *tickRanges* functions: - - require("@arangodb/replication").logger.firstTick(); - -This will return the minimum tick value that the server can provide to replication -clients via its replication APIs. The *tickRanges* function returns the minimum -and maximum tick values per logfile: - - require("@arangodb/replication").logger.tickRanges(); - -### Replication Applier - -**Purpose** - -The purpose of the _replication applier_ is to read data from a master database's -event log, and apply them locally. The _applier_ will check the master database -for new operations periodically. It will perform an incremental synchronization, -i.e. only asking the master for operations that occurred after the last synchronization. - -The _replication applier_ does not get notified by the master database when there -are "new" operations available, but instead uses the pull principle. It might thus -take some time (the so-called *replication lag*) before an operation from the master -database gets shipped to, and applied in, a slave database. - -The _replication applier_ of a database is run in a separate thread. It may encounter -problems when an operation from the master cannot be applied safely, or when the -connection to the master database goes down (network outage, master database is -down or unavailable etc.). In this case, the database's _replication applier_ thread -might terminate itself. It is then up to the administrator to fix the problem and -restart the database's _replication applier_. - -If the _replication applier_ cannot connect to the master database, or the -communication fails at some point during the synchronization, the _replication applier_ -will try to reconnect to the master database. It will give up reconnecting only -after a configurable amount of connection attempts. - -The _replication applier_ state is queryable at any time by using the *state* command -of the _applier_. This will return the state of the _applier_ of the current database: - -```js -require("@arangodb/replication").applier.state(); -``` - -The result might look like this: - -```js -{ - "state" : { - "started" : "2019-03-01T11:36:33Z", - "running" : true, - "phase" : "running", - "lastAppliedContinuousTick" : "2050724544", - "lastProcessedContinuousTick" : "2050724544", - "lastAvailableContinuousTick" : "2050724546", - "safeResumeTick" : "2050694546", - "ticksBehind" : 2, - "progress" : { - "time" : "2019-03-01T11:36:33Z", - "message" : "fetching master log from tick 2050694546, last scanned tick 2050664547, first regular tick 2050544543, barrier: 0, open transactions: 1, chunk size 6291456", - "failedConnects" : 0 - }, - "totalRequests" : 2, - "totalFailedConnects" : 0, - "totalEvents" : 50010, - "totalDocuments" : 50000, - "totalRemovals" : 0, - "totalResyncs" : 0, - "totalOperationsExcluded" : 0, - "totalApplyTime" : 1.1071290969848633, - "averageApplyTime" : 1.1071290969848633, - "totalFetchTime" : 0.2129514217376709, - "averageFetchTime" : 0.10647571086883545, - "lastError" : { - "errorNum" : 0 - }, - "time" : "2019-03-01T11:36:34Z" - }, - "server" : { - "version" : "3.4.4", - "serverId" : "46402312160836" - }, - "endpoint" : "tcp://master.example.org", - "database" : "test" -} -``` - -The *running* attribute indicates whether the _replication applier_ of the current -database is currently running and polling the master at *endpoint* for new events. - -The *started* attribute shows at what date and time the applier was started (if at all). - -The *progress.failedConnects* attribute shows how many failed connection attempts -the _replication applier_ currently has encountered in a row. In contrast, the -*totalFailedConnects* attribute indicates how many failed connection attempts the -_applier_ has made in total. The *totalRequests* attribute shows how many requests -the _applier_ has sent to the master database in total. - -The *totalEvents* attribute shows how many log events the _applier_ has read from the -master. The *totalDocuments* and *totalRemovals* attributes indicate how may document -operations the slave has applied locally. - -The attributes *totalApplyTime* and *totalFetchTime* show the total time the applier -spent for applying data batches locally, and the total time the applier waited on -data-fetching requests to the master, respectively. -The *averageApplyTime* and *averageFetchTime* attributes show the average times clocked -for these operations. Note that the average times will greatly be influenced by the -chunk size used in the applier configuration (bigger chunk sizes mean less requests to -the slave, but the batches will include more data and take more time to create -and apply). - -The *progress.message* sub-attribute provides a brief hint of what the _applier_ -currently does (if it is running). The *lastError* attribute also has an optional -*errorMessage* sub-attribute, showing the latest error message. The *errorNum* -sub-attribute of the *lastError* attribute can be used by clients to programmatically -check for errors. It should be *0* if there is no error, and it should be non-zero -if the _applier_ terminated itself due to a problem. - -Below is an example of the state after the _replication applier_ terminated itself -due to (repeated) connection problems: - -```js -{ - "state" : { - "started" : "2019-03-01T11:51:18Z", - "running" : false, - "phase" : "inactive", - "lastAppliedContinuousTick" : "2101606350", - "lastProcessedContinuousTick" : "2101606370", - "lastAvailableContinuousTick" : "2101606370", - "safeResumeTick" : "2101606350", - "progress" : { - "time" : "2019-03-01T11:52:45Z", - "message" : "applier shut down", - "failedConnects" : 6 - }, - "totalRequests" : 19, - "totalFailedConnects" : 6, - "totalEvents" : 0, - "totalDocuments" : 0, - "totalRemovals" : 0, - "totalResyncs" : 0, - "totalOperationsExcluded" : 0, - "totalApplyTime" : 0, - "averageApplyTime" : 0, - "totalFetchTime" : 0.03386974334716797, - "averageFetchTime" : 0.0028224786122639975, - "lastError" : { - "errorNum" : 1400, - "time" : "2019-03-01T11:52:45Z", - "errorMessage" : "could not connect to master at tcp://127.0.0.1:8529 for URL /_api/wal/tail?chunkSize=6291456&barrier=0&from=2101606369&lastScanned=2101606370&serverId=46402312160836&includeSystem=true&includeFoxxQueues=false: Could not connect to 'http+tcp://127.0.0.1:852..." - }, - "time" : "2019-03-01T11:52:56Z" - }, - "server" : { - "version" : "3.4.4", - "serverId" : "46402312160836" - }, - "endpoint" : "tcp://master.example.org", - "database" : "test" -} -``` - -**Note**: the state of a database's replication applier is queryable via the HTTP -API, too. Please refer to [HTTP Interface for Replication](../../../../HTTP/Replications/index.html) -for more details. - diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/MasterSlave/Limitations.md b/Documentation/Books/Manual/Architecture/DeploymentModes/MasterSlave/Limitations.md deleted file mode 100644 index b46b30dbb56c..000000000000 --- a/Documentation/Books/Manual/Architecture/DeploymentModes/MasterSlave/Limitations.md +++ /dev/null @@ -1,38 +0,0 @@ -Master/Slave Limitations -========================= - -The Master/Slave setup in ArangoDB has a few limitations. Some of these limitations -may be removed in later versions of ArangoDB: - -* there is no feedback from the slaves to the master. If a slave cannot apply an event - it got from the master, the master will have a different state of data. In this - case, the _replication applier_ on the slave will stop and report an error. Administrators - can then either "fix" the problem or re-sync the data from the master to the slave - and start the applier again. -* at the moment it is assumed that only the _replication applier_ executes write - operations on a slave. ArangoDB currently does not prevent users from carrying out - their own write operations on slaves, though this might lead to undefined behavior - and the _replication applier_ stopping. -* when a replication slave asks a master for log events, the replication master will - return all write operations for user-defined collections, but it will exclude write - operations for certain system collections. The following collections are excluded - intentionally from replication: *_apps*, *_trx*, *_replication*, *_configuration*, - *_jobs*, *_queues*, *_sessions*, *_foxxlog* and all statistics collections. - Write operations for the following system collections can be queried from a master: - *_aqlfunctions*, *_graphs*, *_users*. -* Foxx applications consist of database entries and application scripts in the file system. - The file system parts of Foxx applications are not tracked anywhere and thus not - replicated in current versions of ArangoDB. To replicate a Foxx application, it is - required to copy the application to the remote server and install it there using the - *foxx-manager* utility. -* master servers do not know which slaves are or will be connected to them. All servers - in a replication setup are currently only loosely coupled. There currently is no way - for a client to query which servers are present in a replication. -* failover must be handled by clients or client APIs. -* the _replication applier_ is single-threaded, but write operations on the master may - be executed in parallel if they affect different collections. Thus the _replication - applier_ might not be able to catch up with a very powerful and loaded master. -* replication is only supported between the two ArangoDB servers running the same - ArangoDB version. It is currently not possible to replicate between different ArangoDB - versions. -* a _replication applier_ cannot apply data from itself. diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/MasterSlave/README.md b/Documentation/Books/Manual/Architecture/DeploymentModes/MasterSlave/README.md deleted file mode 100644 index 000d40aa7184..000000000000 --- a/Documentation/Books/Manual/Architecture/DeploymentModes/MasterSlave/README.md +++ /dev/null @@ -1,15 +0,0 @@ -Master/Slave -============ - -This _Chapter_ introduces ArangoDB's _Master/Slave_ environment. - -**Sections:** - -- [Master/Slave Architecture](Architecture.md) -- [Master/Slave Limitations](Limitations.md) - -For further information about _Master/Slave_ in ArangoDB, please refer to the following sections included in other chapters of this Manual: - -- [Master/Slave Deployment](../../../Deployment/MasterSlave/README.md) -- [Master/Slave Administration](../../../Administration/MasterSlave/README.md) - diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/README.md b/Documentation/Books/Manual/Architecture/DeploymentModes/README.md deleted file mode 100644 index f487668a3df2..000000000000 --- a/Documentation/Books/Manual/Architecture/DeploymentModes/README.md +++ /dev/null @@ -1,13 +0,0 @@ -ArangoDB Deployment Modes -========================= - -- [Single Instance](SingleInstance/README.md) -- [Master/Slave](MasterSlave/README.md) -- [Active Failover](ActiveFailover/README.md) -- [Cluster](Cluster/README.md) -- [Multiple Datacenters](DC2DC/README.md) - -See also: - -- [Single Instance vs. Cluster](../SingleInstanceVsCluster.md) - diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/SingleInstance/README.md b/Documentation/Books/Manual/Architecture/DeploymentModes/SingleInstance/README.md deleted file mode 100644 index 51342a282cca..000000000000 --- a/Documentation/Books/Manual/Architecture/DeploymentModes/SingleInstance/README.md +++ /dev/null @@ -1,19 +0,0 @@ -Single Instance -=============== - -Running a single instance of ArangoDB is the most simple way to get started. -It means to run the ArangoDB Server binary `arangod` stand-alone, without -replication, without failover opportunity and not as cluster together with -other nodes. - -You may run multiple processes of `arangod` side-by-side on the same machine as -single instances, as long as they are configured for different ports and data -folders. The official installers may not support multiple installations -side-by-side, but you can get archive packages and unpack them manually. - -The provided ArangoDB packages run as single instances out of the box. - -See also: - -- [Installation](../../../Installation/README.md) -- [Single Instance Deployment](../../../Deployment/SingleInstance/README.md) diff --git a/Documentation/Books/Manual/Architecture/README.md b/Documentation/Books/Manual/Architecture/README.md deleted file mode 100644 index 0acb86b8e607..000000000000 --- a/Documentation/Books/Manual/Architecture/README.md +++ /dev/null @@ -1,8 +0,0 @@ -Architecture -============ - -- [ArangoDB Deployment Modes](DeploymentModes/README.md) -- [Single Instance vs. Cluster](SingleInstanceVsCluster.md) -- [Storage Engines](StorageEngines.md) -- [Replication](Replication/README.md) -- [Write-ahead log](WriteAheadLog.md) diff --git a/Documentation/Books/Manual/Architecture/Replication/README.md b/Documentation/Books/Manual/Architecture/Replication/README.md deleted file mode 100644 index c20300f0d42c..000000000000 --- a/Documentation/Books/Manual/Architecture/Replication/README.md +++ /dev/null @@ -1,107 +0,0 @@ -Replication -=========== - -Replication allows you to *replicate* data onto another machine. It -forms the base of all disaster recovery and failover features ArangoDB -offers. - -ArangoDB offers **synchronous** and **asynchronous** replication. - -Synchronous replication is used between the _DBServers_ of an ArangoDB -Cluster. - -Asynchronous replication is used: - -- between the _master_ and the _slave_ of an ArangoDB [_Master/Slave_](../../Architecture/DeploymentModes/MasterSlave/README.md) setup -- between the _Leader_ and the _Follower_ of an ArangoDB [_Active Failover_](../../Architecture/DeploymentModes/ActiveFailover/README.md) setup -- between multiple ArangoDB [Data Centers](../../Architecture/DeploymentModes/DC2DC/README.md) (inside the same Data Center replication is synchronous) - -Synchronous replication ------------------------ - -Synchronous replication only works within an ArangoDB Cluster and is typically -used for mission critical data which must be accessible at all -times. Synchronous replication generally stores a copy of a shard's -data on another DBServer and keeps it in sync. Essentially, when storing -data after enabling synchronous replication the Cluster will wait for -all replicas to write all the data before greenlighting the write -operation to the client. This will naturally increase the latency a -bit, since one more network hop is needed for each write. However, it -will enable the cluster to immediately fail over to a replica whenever -an outage has been detected, without losing any committed data, and -mostly without even signaling an error condition to the client. - -Synchronous replication is organized such that every _shard_ has a -_leader_ and `r-1` _followers_, where `r` denoted the replication -factor. The number of _followers_ can be controlled using the -`replicationFactor` parameter whenever you create a _collection_, the -`replicationFactor` parameter is the total number of copies being -kept, that is, it is one plus the number of _followers_. - -In addition to the `replicationFactor` we have a `minReplicationFactor` -the locks down a collection as soon as we have lost too many followers. - - -Asynchronous replication ------------------------- - -In ArangoDB any write operation is logged in the _write-ahead -log_. - -When using asynchronous replication _slaves_ (or _followers_) -connect to a _master_ (or _leader_) and apply locally all the events from -the master log in the same order. As a result the _slaves_ (_followers_) -will have the same state of data as the _master_ (_leader_). - -_Slaves_ (_followers_) are only eventually consistent with the _master_ (_leader_). - -Transactions are honored in replication, i.e. transactional write operations will -become visible on _slaves_ atomically. - -As all write operations will be logged to a master database's _write-ahead log_, the -replication in ArangoDB currently cannot be used for write-scaling. The main purposes -of the replication in current ArangoDB are to provide read-scalability and "hot backups" -of specific databases. - -It is possible to connect multiple _slave_ to the same _master_. _Slaves_ should be used -as read-only instances, and no user-initiated write operations -should be carried out on them. Otherwise data conflicts may occur that cannot be solved -automatically, and that will make the replication stop. - -In an asynchronous replication scenario slaves will _pull_ changes -from the _master_. _Slaves_ need to know to which _master_ they should -connect to, but a _master_ is not aware of the _slaves_ that replicate from it. -When the network connection between the _master_ and a _slave_ goes down, write -operations on the master can continue normally. When the network is up again, _slaves_ -can reconnect to the _master_ and transfer the remaining changes. This will -happen automatically provided _slaves_ are configured appropriately. - -Before 3.3.0 asynchronous replication was per database. Starting with 3.3.0 it is possible -to setup global replication. - -### Replication lag - -As decribed above, write operations are applied first in the _master_, and then applied -in the _slaves_. - -For example, let's assume a write operation is executed in the _master_ -at point in time _t0_. To make a _slave_ apply the same operation, it must first -fetch the write operation's data from master's write-ahead log, then parse it and -apply it locally. This will happen at some point in time after _t0_, let's say _t1_. - -The difference between _t1_ and _t0_ is called the _replication lag_, and it is unavoidable -in asynchronous replication. The amount of replication _lag_ depends on many factors, a -few of which are: - -* the network capacity between the _slaves_ and the _master_ -* the load of the _master_ and the _slaves_ -* the frequency in which _slaves_ poll the _master_ for updates - -Between _t0_ and _t1_, the state of data on the _master_ is newer than the state of data -on the _slaves_. At point in time _t1_, the state of data on the _master_ and _slaves_ -is consistent again (provided no new data modifications happened on the _master_ in -between). Thus, the replication will lead to an _eventually consistent_ state of data. - -### Replication overhead - -As the _master_ servers are logging any write operation in the _write-ahead-log_ anyway replication doesn't cause any extra overhead on the _master_. However it will of course cause some overhead for the _master_ to serve incoming read requests of the _slaves_. Returning the requested data is however a trivial task for the _master_ and should not result in a notable performance degration in production. diff --git a/Documentation/Books/Manual/Architecture/SingleInstanceVsCluster.md b/Documentation/Books/Manual/Architecture/SingleInstanceVsCluster.md deleted file mode 100644 index 2a20aebc98ba..000000000000 --- a/Documentation/Books/Manual/Architecture/SingleInstanceVsCluster.md +++ /dev/null @@ -1,157 +0,0 @@ -Single Instance vs. Cluster -=========================== - -In general, a single server configuration and a cluster configuration -of ArangoDB behave very similarly. However, there are differences due to -the different nature of these setups. This can lead to a discrepancy in behavior -between these two configurations. A summary of potential differences follows. - -See [Migrating from Single Instance to Cluster](../Deployment/MigratingSingleInstanceCluster.md) -for practical information. - -Locking and dead-lock prevention --------------------------------- - -In a single server configuration all data is local and dead-locks can -easily be detected. In a cluster configuration data is distributed to -many servers and some conflicts cannot be detected easily. Therefore -we have to do some things (like locking shards) sequentially and in a -strictly predefined order, to avoid dead-locks in this way by design. - -Document Keys -------------- - -In a cluster the *autoincrement* key generator is not supported. You -have to use the *traditional* or user defined keys. - -Indexes -------- - -### Unique constraints - -There are restrictions on the allowed unique constraints in a cluster. -Any unique constraint which cannot be checked locally on a per shard -basis is not allowed in a cluster setup. More concretely, unique -constraints in a cluster are only allowed in the following situations: - - - there is always a unique constraint on the primary key `_key`, if - the collection is not sharded by `_key`, then `_key` must be - automatically generated by the database and cannot be prescribed by - the client - - the collection has only one shard, in which case the same unique - constraints are allowed as in the single instance case - - if the collection is sharded by exactly one other attribute than - `_key`, then there can be a unique constraint on that attribute - -These restrictions are imposed, because otherwise checking for a unique -constraint violation would involve checking with all shards, which would have -a considerable performance impact. - -Renaming --------- - -It is not possible to rename collections or views in a cluster. - -AQL ---- - -The AQL syntax for single server and cluster is identical. However, -there is one additional requirement (regarding *with*) and possible -performance differences. - -### WITH - -The `WITH` keyword in AQL must be used to declare which collections -are used in the AQL. For most AQL requires the required collections -can be deduced from the query itself. However, with traversals this is -not possible, if edge collections are used directly. See -[AQL WITH operation](../../AQL/Operations/With.html) -for details. The `WITH` statement is not necessary when using named graphs -for the traversals. - -As deadlocks cannot be detected in a cluster environment easily, the -`WITH` keyword is mandatory for this particular situation in a cluster, -but not in a single server. - -### Performance - -Performance of AQL queries can vary between single server and cluster. -If a query can be distributed to many DBserver and executed in -parallel then cluster performance can be better. For example, if you -do a distributed `COLLECT` aggregation or a distributed `FILTER` -operation. - -On the other hand, if you do a join or a traversal and the data is not -local to one server then the performance can be worse compared to a -single server. This is especially true for traversal if the data is -not sharded with care. Our smart graph feature helps with this for -traversals. - -Single document operations can have a higher throughput in cluster but -will also have a higher latency, due to an additional network hop from -coordinator to dbserver. - -Any operation that needs to find documents by anything else but the -shard key will have to fan out to all shards, so it will be a lot -slower than when referring to the documents using the shard -key. Optimized lookups by shard key can only be used for equality -lookups, e.g. not for range lookups. - -### Memory usage - -Some query results must be built up in memory on a coordinator, for -example if a dataset needs to be sorted on the fly. This can relatively -easily overwhelm a coordinator if the dataset is sharded across multiple -dbservers. Use indexes and streaming cursors (>= 3.4) to circumvent this -problem. - -Transactions ------------- - -Using a single instance of ArangoDB, multi-document / multi-collection -queries are guaranteed to be fully ACID. This is more than many other -NoSQL database systems support. In cluster mode, single-document -operations are also fully ACID. Multi-document / multi-collection -queries in a cluster are not ACID, which is equally the case for -competing database systems. See [Transactions](../Transactions/README.md) -for details. - -Batch operations for multiple documents in the same collection are only -fully transactional in a single instance. - -Smart graphs ------------- - -In smart graphs there are restrictions on the values of the `_key` -attributes. Essentially, the `_key` attribute values for vertices must -be prefixed with the string value of the smart graph attribute and a -colon. A similar restriction applies for the edges. - -Foxx ----- - -Foxx apps run on the coordinators of a cluster. Since coordinators are -stateless, one must not use regular file accesses in Foxx apps in a -cluster. - -Agency ------- - -A cluster deployment needs a central, RAFT-based key/value store called -"the agency" to keep the current cluster configuration and manage -failover. Being RAFT-based, this is a real-time system. If your servers -running the agency instances (typically three or five) receive too much -load, the RAFT protocol stops working and the whole stability of the -cluster is endangered. If you foresee this problem, run the agency -instances on separate nodes. All this is not necessary in a single -server deployment. - -Dump/Restore ------------- - -At the time of this writing, the `arangodump` utility in a cluster -cannot guarantee a consistent snapshot across multiple shards or even -multiple collections. This is in line with most other current NoSQL -database systems. We are working on a consistent snapshot and -incremental backup capability for 3.5. In a single server, `arangodump` -produces a consistent snapshot. diff --git a/Documentation/Books/Manual/Architecture/StorageEngines.md b/Documentation/Books/Manual/Architecture/StorageEngines.md deleted file mode 100644 index 17fa9c7db7bf..000000000000 --- a/Documentation/Books/Manual/Architecture/StorageEngines.md +++ /dev/null @@ -1,186 +0,0 @@ -# Storage Engines - -At the very bottom of the ArangoDB database lies the storage -engine. The storage engine is responsible for persisting the documents -on disk, holding copies in memory, providing indexes and caches to -speed up queries. - -Up to version 3.1 ArangoDB only supported memory-mapped files (**MMFiles**) -as sole storage engine. Beginning with 3.2 ArangoDB has support for -pluggable storage engines. The second supported engine is **RocksDB** from -Facebook. - -Up to including versions 3.3, MMFiles was the default storage engine in -ArangoDB. Since version 3.4, the default storage engine is RocksDB. - -The engine must be selected for the whole server / cluster. It is not -possible to mix engines. The transaction handling and write-ahead-log -format in the individual engines is very different and therefore cannot -be mixed. - -{% hint 'tip' %} -For practical information on how to switch storage engine please refer to the -[Switching the storage engine](../Administration/Engine/SwitchEngine.md) -page. -{% endhint %} - -| MMFiles | RocksDB | -|---|---| -| optional | default | -| dataset needs to fit into memory | work with as much data as fits on disk | -| indexes in memory | hot set in memory, data and indexes on disk | -| slow restart due to index rebuilding | fast startup (no rebuilding of indexes) | -| volatile collections (only in memory, optional) | collection data always persisted | -| collection level locking (writes block reads) | concurrent reads and writes | - -*Blog article: [Comparing new RocksDB and MMFiles storage engines](https://www.arangodb.com/why-arangodb/comparing-rocksdb-mmfiles-storage-engines/)* - -## MMFiles - -The MMFiles (Memory-Mapped Files) engine is optimized for the use-case where -the data fits into the main memory. It allows for very fast concurrent -reads. However, writes block reads and locking is on collection -level. - -Indexes are always in memory and are rebuilt on startup. This -gives better performance but imposes a longer startup time. - -## RocksDB - -RocksDB is an embeddable persistent key-value store. It is a log -structure database and is optimized for fast storage. - -The RocksDB engine is optimized for large data-sets and allows for a -steady insert performance even if the data-set is much larger than the -main memory. Indexes are always stored on disk but caches are used to -speed up performance. RocksDB uses document-level locks allowing for -concurrent writes. Writes do not block reads. Reads do not block writes. - -### Advantages - -RocksDB is a very flexible engine that can be configured for various use cases. - -The main advantages of RocksDB are: - -- document-level locks -- support for large data-sets -- persistent indexes - -### Caveats - -RocksDB allows concurrent writes. However, when touching the same document a -write conflict is raised. This cannot happen with the MMFiles engine, therefore -applications that switch to RocksDB need to be prepared that such exception can -arise. It is possible to exclusively lock collections when executing AQL. This -will avoid write conflicts but also inhibits concurrent writes. - -Currently, another restriction is due to the transaction handling in -RocksDB. Transactions are limited in total size. If you have a statement -modifying a lot of documents it is necessary to commit data inbetween. This will -be done automatically for AQL by default. Transactions that get too big (in terms of -number of operations involved or the total size of data modified by the transaction) -will be committed automatically. Effectively this means that big user transactions -are split into multiple smaller RocksDB transactions that are committed individually. -The entire user transaction will not necessarily have ACID properties in this case. - -The threshold values for transaction sizes can be configured globally using the -startup options - -- [`--rocksdb.intermediate-commit-size`](../Programs/Arangod/Rocksdb.md#non-pass-through-options) - -- [`--rocksdb.intermediate-commit-count`](../Programs/Arangod/Rocksdb.md#non-pass-through-options) - -- [`--rocksdb.max-transaction-size`](../Programs/Arangod/Rocksdb.md#non-pass-through-options) - -It is also possible to override these thresholds per transaction. - -### Performance - -RocksDB is based on a log-structured merge tree. A good introduction can be -found in: - -- http://www.benstopford.com/2015/02/14/log-structured-merge-trees/ -- https://blog.acolyer.org/2014/11/26/the-log-structured-merge-tree-lsm-tree/ - -The basic idea is that data is organized in levels were each level is a factor -larger than the previous. New data will reside in smaller levels while old data -is moved down to the larger levels. This allows to support high rate of inserts -over an extended period. In principle it is possible that the different levels -reside on different storage media. The smaller ones on fast SSD, the larger ones -on bigger spinning disks. - -RocksDB itself provides a lot of different knobs to fine tune the storage -engine according to your use-case. ArangoDB supports the most common ones -using the options below. - -Performance reports for the storage engine can be found here: - -- https://github.com/facebook/rocksdb/wiki/performance-benchmarks -- https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide - -### ArangoDB options - -ArangoDB has a cache for the persistent indexes in RocksDB. The total size -of this cache is controlled by the option - - --cache.size - -RocksDB also has a cache for the blocks stored on disk. The size of -this cache is controlled by the option - - --rocksdb.block-cache-size - -ArangoDB distributes the available memory equally between the two -caches by default. - -ArangoDB chooses a size for the various levels in RocksDB that is -suitable for general purpose applications. - -RocksDB log strutured data levels have increasing size - - MEM: -- - L0: -- - L1: -- -- - L2: -- -- -- -- - ... - -New or updated Documents are first stored in memory. If this memtable -reaches the limit given by - - --rocksdb.write-buffer-size - -it will converted to an SST file and inserted at level 0. - -The following option controls the size of each level and the depth. - - --rocksdb.num-levels N - -Limits the number of levels to N. By default it is 7 and there is -seldom a reason to change this. A new level is only opened if there is -too much data in the previous one. - - --rocksdb.max-bytes-for-level-base B - -L0 will hold at most B bytes. - - --rocksdb.max-bytes-for-level-multiplier M - -Each level is at most M times as much bytes as the previous -one. Therefore the maximum number of bytes forlevel L can be -calculated as - - max-bytes-for-level-base * (max-bytes-for-level-multiplier ^ (L-1)) - -### Future - -RocksDB imposes a limit on the transaction size. It is optimized to -handle small transactions very efficiently, but is effectively limiting -the total size of transactions. - -ArangoDB currently uses RocksDB's transactions to implement the ArangoDB -transaction handling. Therefore the same restrictions apply for ArangoDB -transactions when using the RocksDB engine. - -We will improve this by introducing distributed transactions in a future -version of ArangoDB. This will allow handling large transactions as a -series of small RocksDB transactions and hence removing the size restriction. diff --git a/Documentation/Books/Manual/Architecture/WriteAheadLog.md b/Documentation/Books/Manual/Architecture/WriteAheadLog.md deleted file mode 100644 index 6b45741b358f..000000000000 --- a/Documentation/Books/Manual/Architecture/WriteAheadLog.md +++ /dev/null @@ -1,70 +0,0 @@ -Write-ahead log -=============== - -Both storage engines use a form of write ahead logging (WAL). - -Starting with version 2.2 ArangoDB stores all data-modification operation in -its write-ahead log. The write-ahead log is sequence of append-only files containing -all the write operations that were executed on the server. - -It is used to run data recovery after a server crash, and can also be used in -a replication setup when slaves need to replay the same sequence of operations as -on the master. - -MMFiles WAL Details -------------------- - -By default, each write-ahead logfile is 32 MiB in size. This size is configurable via the -option *--wal.logfile-size*. -When a write-ahead logfile is full, it is set to read-only, and following operations will -be written into the next write-ahead logfile. By default, ArangoDB will reserve some -spare logfiles in the background so switching logfiles should be fast. How many reserve -logfiles ArangoDB will try to keep available in the background can be controlled by the -configuration option *--wal.reserve-logfiles*. - -Data contained in full write-ahead files will eventually be transferred into the journals or -datafiles of collections. Only the "surviving" documents will be copied over. When all -remaining operations from a write-ahead logfile have been copied over into the journals -or datafiles of the collections, the write-ahead logfile can safely be removed if it is -not used for replication. - -Long-running transactions prevent write-ahead logfiles from being fully garbage-collected -because it is unclear whether a transaction will commit or abort. Long-running transactions -can thus block the garbage-collection progress and should therefore be avoided at -all costs. - -On a system that acts as a replication master, it is useful to keep a few of the -already collected write-ahead logfiles so replication slaves still can fetch data from -them if required. How many collected logfiles will be kept before they get deleted is -configurable via the option *--wal.historic-logfiles*. - -For all write-ahead log configuration options, please refer to the page -[Write-ahead log options](../Programs/Arangod/Wal.md). - - -RocksDB WAL Details -------------------- - -The options mentioned above only apply for MMFiles. The WAL in the RocksDB -storage engine works slightly differently. - -_Note:_ In rocksdb the WAL options are all prefixed with `--rocksdb.*`. -The `--wal.*` options do have no effect. - -The individual RocksDB WAL files are per default about 64 MiB big. -The size will always be proportionally sized to the value specified via -`--rocksdb.write-buffer-size`. The value specifies the amount of data to build -up in memory (backed by the unsorted WAL on disk) before converting it to a -sorted on-disk file. - -Larger values can increase performance, especially during bulk loads. -Up to `--rocksdb.max-write-buffer-number` write buffers may be held in memory -at the same time, so you may wish to adjust this parameter to control memory -usage. A larger write buffer will result in a longer recovery time the next -time the database is opened. - -The RocksDB WAL only contains committed transactions. This means you will never -see partial transactions in the replication log, but it also means transactions -are tracked completely in-memory. In practice this causes RocksDB transaction -sizes to be limited, for more information see the -[RocksDB Configuration](../Programs/Arangod/Rocksdb.md) diff --git a/Documentation/Books/Manual/BackupRestore/README.md b/Documentation/Books/Manual/BackupRestore/README.md deleted file mode 100644 index 2c3d5765c761..000000000000 --- a/Documentation/Books/Manual/BackupRestore/README.md +++ /dev/null @@ -1,35 +0,0 @@ -Backup and Restore -================== - -Backup and restore can be done via the tools -[_arangodump_](../Programs/Arangodump/README.md) and -[_arangorestore_](../Programs/Arangorestore/README.md). - -{% hint 'tip' %} -In order to speed up the _arangorestore_ performance in a Cluster environment, -the [Fast Cluster Restore](../Programs/Arangorestore/FastClusterRestore.md) -procedure is recommended. -{% endhint %} - -Performing frequent backups is important and a recommended best practices that -can allow you to recover your data in case unexpected problems occur. -Hardware failures, system crashes, or users mistakenly deleting data can always -happen. Furthermore, while a big effort is put into the development and testing -of ArangoDB (in all its deployment modes), ArangoDB, as any other software -product, might include bugs or errors and data loss could occur. -It is therefore important to regularly backup your data to be able to recover -and get up and running again in case of serious problems. - -Creating backups of your data before an ArangoDB upgrade is also a best practice. - -{% hint 'warning' %} -Making use of a high availability deployment mode of ArangoDB, like Active Failover, -Cluster or data-center to data-center replication, does not remove the need of -taking frequent backups, which are recommended also when using such deployment modes. -{% endhint %} - - - - - - diff --git a/Documentation/Books/Manual/DataModeling/Collections/CollectionMethods.md b/Documentation/Books/Manual/DataModeling/Collections/CollectionMethods.md deleted file mode 100644 index 547ceedb17e4..000000000000 --- a/Documentation/Books/Manual/DataModeling/Collections/CollectionMethods.md +++ /dev/null @@ -1,408 +0,0 @@ -Collection Methods -================== - -Drop ----- - - - - -drops a collection -`collection.drop(options)` - -Drops a *collection* and all its indexes and data. -In order to drop a system collection, an *options* object -with attribute *isSystem* set to *true* must be specified. - -**Note**: dropping a collection in a cluster, which is prototype for -sharing in other collections is prohibited. In order to be able to -drop such a collection, all dependent collections must be dropped -first. - -**Examples** - - @startDocuBlockInline collectionDrop - @EXAMPLE_ARANGOSH_OUTPUT{collectionDrop} - ~ db._create("example"); - col = db.example; - col.drop(); - col; - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionDrop - - @startDocuBlockInline collectionDropSystem - @EXAMPLE_ARANGOSH_OUTPUT{collectionDropSystem} - ~ db._create("_example", { isSystem: true }); - col = db._example; - col.drop({ isSystem: true }); - col; - ~ db._drop("example", { isSystem: true }); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionDropSystem - - -Truncate --------- - - - - -truncates a collection -`collection.truncate()` - -Truncates a *collection*, removing all documents but keeping all its -indexes. - - -**Examples** - - -Truncates a collection: - - @startDocuBlockInline collectionTruncate - @EXAMPLE_ARANGOSH_OUTPUT{collectionTruncate} - ~ db._create("example"); - col = db.example; - col.save({ "Hello" : "World" }); - col.count(); - col.truncate(); - col.count(); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionTruncate - - -Compact -------- - - - -Introduced in: v3.4.5 - -Compacts the data of a collection -`collection.compact()` - -Compacts the data of a collection in order to reclaim disk space. For the -MMFiles storage engine, the operation will reset the collection's last -compaction timestamp, so it will become a candidate for compaction. For the -RocksDB storage engine, the operation will compact the document and index -data by rewriting the underlying .sst files and only keeping the relevant -entries. - -Under normal circumstances running a compact operation is not necessary, -as the collection data will eventually get compacted anyway. However, in -some situations, e.g. after running lots of update/replace or remove -operations, the disk data for a collection may contain a lot of outdated data -for which the space shall be reclaimed. In this case the compaction operation -can be used. - - -Properties ----------- - - - -@startDocuBlock collectionProperties - -Figures -------- - - - - -returns the figures of a collection -`collection.figures()` - -Returns an object containing statistics about the collection. -**Note** : Retrieving the figures will always load the collection into -memory. - -* *alive.count*: The number of currently active documents in all datafiles and - journals of the collection. Documents that are contained in the - write-ahead log only are not reported in this figure. -* *alive.size*: The total size in bytes used by all active documents of the - collection. Documents that are contained in the write-ahead log only are - not reported in this figure. -- *dead.count*: The number of dead documents. This includes document - versions that have been deleted or replaced by a newer version. Documents - deleted or replaced that are contained in the write-ahead log only are not - reported in this figure. -* *dead.size*: The total size in bytes used by all dead documents. -* *dead.deletion*: The total number of deletion markers. Deletion markers - only contained in the write-ahead log are not reporting in this figure. -* *datafiles.count*: The number of datafiles. -* *datafiles.fileSize*: The total filesize of datafiles (in bytes). -* *journals.count*: The number of journal files. -* *journals.fileSize*: The total filesize of the journal files - (in bytes). -* *compactors.count*: The number of compactor files. -* *compactors.fileSize*: The total filesize of the compactor files - (in bytes). -* *shapefiles.count*: The number of shape files. This value is - deprecated and kept for compatibility reasons only. The value will always - be 0 since ArangoDB 2.0 and higher. -* *shapefiles.fileSize*: The total filesize of the shape files. This - value is deprecated and kept for compatibility reasons only. The value will - always be 0 in ArangoDB 2.0 and higher. -* *shapes.count*: The total number of shapes used in the collection. - This includes shapes that are not in use anymore. Shapes that are contained - in the write-ahead log only are not reported in this figure. -* *shapes.size*: The total size of all shapes (in bytes). This includes - shapes that are not in use anymore. Shapes that are contained in the - write-ahead log only are not reported in this figure. -* *attributes.count*: The total number of attributes used in the - collection. Note: the value includes data of attributes that are not in use - anymore. Attributes that are contained in the write-ahead log only are - not reported in this figure. -* *attributes.size*: The total size of the attribute data (in bytes). - Note: the value includes data of attributes that are not in use anymore. - Attributes that are contained in the write-ahead log only are not - reported in this figure. -* *indexes.count*: The total number of indexes defined for the - collection, including the pre-defined indexes (e.g. primary index). -* *indexes.size*: The total memory allocated for indexes in bytes. -* *lastTick*: The tick of the last marker that was stored in a journal - of the collection. This might be 0 if the collection does not yet have - a journal. -* *uncollectedLogfileEntries*: The number of markers in the write-ahead - log for this collection that have not been transferred to journals or - datafiles. -* *documentReferences*: The number of references to documents in datafiles - that JavaScript code currently holds. This information can be used for - debugging compaction and unload issues. -* *waitingFor*: An optional string value that contains information about - which object type is at the head of the collection's cleanup queue. This - information can be used for debugging compaction and unload issues. -* *compactionStatus.time*: The point in time the compaction for the collection - was last executed. This information can be used for debugging compaction - issues. -* *compactionStatus.message*: The action that was performed when the compaction - was last run for the collection. This information can be used for debugging - compaction issues. - -**Note**: collection data that are stored in the write-ahead log only are -not reported in the results. When the write-ahead log is collected, documents -might be added to journals and datafiles of the collection, which may modify -the figures of the collection. Also note that `waitingFor` and `compactionStatus` -may be empty when called on a coordinator in a cluster. - -Additionally, the filesizes of collection and index parameter JSON files are -not reported. These files should normally have a size of a few bytes -each. Please also note that the *fileSize* values are reported in bytes -and reflect the logical file sizes. Some filesystems may use optimizations -(e.g. sparse files) so that the actual physical file size is somewhat -different. Directories and sub-directories may also require space in the -file system, but this space is not reported in the *fileSize* results. - -That means that the figures reported do not reflect the actual disk -usage of the collection with 100% accuracy. The actual disk usage of -a collection is normally slightly higher than the sum of the reported -*fileSize* values. Still the sum of the *fileSize* values can still be -used as a lower bound approximation of the disk usage. - - -**Examples** - - - @startDocuBlockInline collectionFigures - @EXAMPLE_ARANGOSH_OUTPUT{collectionFigures} - ~ require("internal").wal.flush(true, true); - db.demo.figures() - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionFigures - - -GetResponsibleShard -------------------- - - - - -returns the responsible shard for the given document. -`collection.getResponsibleShard(document)` - -Returns a string with the responsible shard's ID. Note that the -returned shard ID is the ID of responsible shard for the document's -shard key values, and it will be returned even if no such document exists. - -**Note**: this function can only be used on a coordinator in a cluster. - - -Load ----- - - - - -loads a collection -`collection.load()` - -Loads a collection into memory. - -**Note**: cluster collections are loaded at all times. - -**Examples** - - - @startDocuBlockInline collectionLoad - @EXAMPLE_ARANGOSH_OUTPUT{collectionLoad} - ~ db._create("example"); - col = db.example; - col.load(); - col; - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionLoad - - -Revision --------- - - - - -returns the revision id of a collection -`collection.revision()` - -Returns the revision id of the collection - -The revision id is updated when the document data is modified, either by -inserting, deleting, updating or replacing documents in it. - -The revision id of a collection can be used by clients to check whether -data in a collection has changed or if it is still unmodified since a -previous fetch of the revision id. - -The revision id returned is a string value. Clients should treat this value -as an opaque string, and only use it for equality/non-equality comparisons. - - -Path ----- - - -returns the physical path of the collection -`collection.path()` - -The *path* operation returns a string with the physical storage path for -the collection data. - -**Note**: this method will return nothing meaningful in a cluster. In a -single-server ArangoDB, this method will only return meaningful data for the -MMFiles engine. - - - -Checksum --------- - - - - -calculates a checksum for the data in a collection -`collection.checksum(withRevisions, withData)` - -The *checksum* operation calculates an aggregate hash value for all document -keys contained in collection *collection*. - -If the optional argument *withRevisions* is set to *true*, then the -revision ids of the documents are also included in the hash calculation. - -If the optional argument *withData* is set to *true*, then all user-defined -document attributes are also checksummed. Including the document data in -checksumming will make the calculation slower, but is more accurate. - -The checksum calculation algorithm changed in ArangoDB 3.0, so checksums from -3.0 and earlier versions for the same data will differ. - -**Note**: this method is not available in a cluster. - - -Unload ------- - - - - -unloads a collection -`collection.unload()` - -Starts unloading a collection from memory. Note that unloading is deferred -until all query have finished. - -**Note**: cluster collections cannot be unloaded. - -**Examples** - - - @startDocuBlockInline CollectionUnload - @EXAMPLE_ARANGOSH_OUTPUT{CollectionUnload} - ~ db._create("example"); - col = db.example; - col.unload(); - col; - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock CollectionUnload - - -Rename ------- - - - - -renames a collection -`collection.rename(new-name)` - -Renames a collection using the *new-name*. The *new-name* must not -already be used for a different collection. *new-name* must also be a -valid collection name. For more information on valid collection names please -refer to the [naming conventions](../NamingConventions/README.md). - -If renaming fails for any reason, an error is thrown. -If renaming the collection succeeds, then the collection is also renamed in -all graph definitions inside the `_graphs` collection in the current -database. - -**Note**: this method is not available in a cluster. - - -**Examples** - - - @startDocuBlockInline collectionRename - @EXAMPLE_ARANGOSH_OUTPUT{collectionRename} - ~ db._create("example"); - c = db.example; - c.rename("better-example"); - c; - ~ db._drop("better-example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionRename - - -Rotate ------- - - - - -rotates the current journal of a collection -`collection.rotate()` - -Rotates the current journal of a collection. This operation makes the -current journal of the collection a read-only datafile so it may become a -candidate for garbage collection. If there is currently no journal available -for the collection, the operation will fail with an error. - -**Note**: this method is specific for the MMFiles storage engine, and there -it is not available in a cluster. - -**Note**: please note that you need appropriate user permissions to execute this. - - To do the rename collections in first place you need to have administrative rights on the database - - To have access to the resulting renamed collection you either need to have access to - all collections of that database (`*`) or a main system administrator has to give you access to - the newly named one. - diff --git a/Documentation/Books/Manual/DataModeling/Collections/DatabaseMethods.md b/Documentation/Books/Manual/DataModeling/Collections/DatabaseMethods.md deleted file mode 100644 index 022d8675d5ac..000000000000 --- a/Documentation/Books/Manual/DataModeling/Collections/DatabaseMethods.md +++ /dev/null @@ -1,503 +0,0 @@ -Database Methods -================ - -Collection ----------- - - - - -returns a single collection or null -`db._collection(collection-name)` - -Returns the collection with the given name or null if no such collection -exists. - -`db._collection(collection-identifier)` - -Returns the collection with the given identifier or null if no such -collection exists. Accessing collections by identifier is discouraged for -end users. End users should access collections using the collection name. - - -**Examples** - - -Get a collection by name: - - @startDocuBlockInline collectionDatabaseNameKnown - @EXAMPLE_ARANGOSH_OUTPUT{collectionDatabaseNameKnown} - db._collection("demo"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionDatabaseNameKnown - -Get a collection by id: - -``` -arangosh> db._collection(123456); -[ArangoCollection 123456, "demo" (type document, status loaded)] -``` - -Unknown collection: - - @startDocuBlockInline collectionDatabaseNameUnknown - @EXAMPLE_ARANGOSH_OUTPUT{collectionDatabaseNameUnknown} - db._collection("unknown"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionDatabaseNameUnknown - - -Create ------- - - - - -creates a new document or edge collection -`db._create(collection-name)` - -Creates a new document collection named *collection-name*. -If the collection name already exists or if the name format is invalid, an -error is thrown. For more information on valid collection names please refer -to the [naming conventions](../NamingConventions/README.md). - -`db._create(collection-name, properties)` - -*properties* must be an object with the following attributes: - -- *waitForSync* (optional, default *false*): If *true* creating - a document will only return after the data was synced to disk. - -- *journalSize* (optional, default is a - configuration parameter: The maximal - size of a journal or datafile. Note that this also limits the maximal - size of a single object. Must be at least 1MB. - -- *isSystem* (optional, default is *false*): If *true*, create a - system collection. In this case *collection-name* should start with - an underscore. End users should normally create non-system collections - only. API implementors may be required to create system collections in - very special occasions, but normally a regular collection will do. - -- *isVolatile* (optional, default is *false*): If *true* then the - collection data is kept in-memory only and not made persistent. Unloading - the collection will cause the collection data to be discarded. Stopping - or re-starting the server will also cause full loss of data in the - collection. The collection itself will remain however (only the data is - volatile). Setting this option will make the resulting collection be - slightly faster than regular collections because ArangoDB does not - enforce any synchronization to disk and does not calculate any CRC - checksums for datafiles (as there are no datafiles). - This option is meaningful for the MMFiles storage engine only. - -- *keyOptions* (optional): additional options for key generation. If - specified, then *keyOptions* should be a JSON object containing the - following attributes (**note**: some of them are optional): - - *type*: specifies the type of the key generator. The currently - available generators are *traditional*, *autoincrement*, *uuid* and - *padded*. - The `traditional` key generator generates numerical keys in ascending order. - The `autoincrement` key generator generates numerical keys in ascending order, - the inital offset and the spacing can be configured (**note**: *autoincrement* is currently only - supported for non-sharded collections). - The `padded` key generator generates keys of a fixed length (16 bytes) in - ascending lexicographical sort order. This is ideal for usage with the _RocksDB_ - engine, which will slightly benefit keys that are inserted in lexicographically - ascending order. The key generator can be used in a single-server or cluster. - The `uuid` key generator generates universally unique 128 bit keys, which - are stored in hexadecimal human-readable format. This key generator can be used - in a single-server or cluster to generate "seemingly random" keys. The keys - produced by this key generator are not lexicographically sorted. - - *allowUserKeys*: if set to *true*, then it is allowed to supply - own key values in the *_key* attribute of a document. If set to - *false*, then the key generator will solely be responsible for - generating keys and supplying own key values in the *_key* attribute - of documents is considered an error. - - *increment*: increment value for *autoincrement* key generator. - Not used for other key generator types. - - *offset*: initial offset value for *autoincrement* key generator. - Not used for other key generator types. - -- *numberOfShards* (optional, default is *1*): in a cluster, this value - determines the number of shards to create for the collection. In a single - server setup, this option is meaningless. - -- *shardKeys* (optional, default is `[ "_key" ]`): in a cluster, this - attribute determines which document attributes are used to determine the - target shard for documents. Documents are sent to shards based on the - values they have in their shard key attributes. The values of all shard - key attributes in a document are hashed, and the hash value is used to - determine the target shard. Note that values of shard key attributes cannot - be changed once set. - This option is meaningless in a single server setup. - - When choosing the shard keys, one must be aware of the following - rules and limitations: In a sharded collection with more than - one shard it is not possible to set up a unique constraint on - an attribute that is not the one and only shard key given in - *shardKeys*. This is because enforcing a unique constraint - would otherwise make a global index necessary or need extensive - communication for every single write operation. Furthermore, if - *_key* is not the one and only shard key, then it is not possible - to set the *_key* attribute when inserting a document, provided - the collection has more than one shard. Again, this is because - the database has to enforce the unique constraint on the *_key* - attribute and this can only be done efficiently if this is the - only shard key by delegating to the individual shards. - -- *replicationFactor* (optional, default is 1): in a cluster, this - attribute determines how many copies of each shard are kept on - different DBServers. The value 1 means that only one copy (no - synchronous replication) is kept. A value of k means that - k-1 replicas are kept. Any two copies reside on different DBServers. - Replication between them is synchronous, that is, every write operation - to the "leader" copy will be replicated to all "follower" replicas, - before the write operation is reported successful. - - If a server fails, this is detected automatically and one of the - servers holding copies take over, usually without an error being - reported. - - When using the *Enterprise Edition* of ArangoDB the replicationFactor - may be set to "satellite" making the collection locally joinable - on every database server. This reduces the number of network hops - dramatically when using joins in AQL at the costs of reduced write - performance on these collections. - -- *minReplicationFactor* (optional, default is 1): in a cluster, this - attribute determines how many copies of each shard are required - to be in sync on the different DBServers. If we have less then these - many copies in the cluster a shard will refuse to write. The - minReplicationFactor can not be larger than replicationFactor. - Please note: during server failures this might lead to writes - not beeing possible until the failover is sorted out and might cause - write slow downs in trade of data durability. - -- *distributeShardsLike*: distribute the shards of this collection - cloning the shard distribution of another. If this value is set, - it will copy the attributes *replicationFactor*, *numberOfShards* and - *shardingStrategy* from the other collection. - -- *shardingStrategy* (optional): specifies the name of the sharding - strategy to use for the collection. Since ArangoDB 3.4 there are - different sharding strategies to select from when creating a new - collection. The selected *shardingStrategy* value will remain - fixed for the collection and cannot be changed afterwards. This is - important to make the collection keep its sharding settings and - always find documents already distributed to shards using the same - initial sharding algorithm. - - The available sharding strategies are: - - `community-compat`: default sharding used by ArangoDB - Community Edition before version 3.4 - - `enterprise-compat`: default sharding used by ArangoDB - Enterprise Edition before version 3.4 - - `enterprise-smart-edge-compat`: default sharding used by smart edge - collections in ArangoDB Enterprise Edition before version 3.4 - - `hash`: default sharding used for new collections starting from version 3.4 - (excluding smart edge collections) - - `enterprise-hash-smart-edge`: default sharding used for new - smart edge collections starting from version 3.4 - - If no sharding strategy is specified, the default will be `hash` for - all collections, and `enterprise-hash-smart-edge` for all smart edge - collections (requires the *Enterprise Edition* of ArangoDB). - Manually overriding the sharding strategy does not yet provide a - benefit, but it may later in case other sharding strategies are added. - - In single-server mode, the *shardingStrategy* attribute is meaningless - and will be ignored. - -- *smartJoinAttribute: in an *Enterprise Edition* cluster, this attribute - determines an attribute of the collection that must contain the shard key value - of the referred-to smart join collection. Additionally, the sharding key - for a document in this collection must contain the value of this attribute, - followed by a colon, followed by the actual primary key of the document. - - This feature can only be used in the *Enterprise Edition* and requires the - *distributeShardsLike* attribute of the collection to be set to the name - of another collection. It also requires the *shardKeys* attribute of the - collection to be set to a single shard key attribute, with an additional ':' - at the end. - A further restriction is that whenever documents are stored or updated in the - collection, the value stored in the *smartJoinAttribute* must be a string. - -`db._create(collection-name, properties, type)` - -Specifies the optional *type* of the collection, it can either be *document* -or *edge*. On default it is document. Instead of giving a type you can also use -*db._createEdgeCollection* or *db._createDocumentCollection*. - -`db._create(collection-name, properties[, type], options)` - -As an optional third (if the *type* string is being omitted) or fourth -parameter you can specify an optional options map that controls how the -cluster will create the collection. These options are only relevant at -creation time and will not be persisted: - -- *waitForSyncReplication* (default: true) - When enabled the server will only report success back to the client - if all replicas have created the collection. Set to *false* if you want faster - server responses and don't care about full replication. - -- *enforceReplicationFactor* (default: true) - When enabled which means the server will check if there are enough replicas - available at creation time and bail out otherwise. Set to *false* to disable - this extra check. - -**Examples** - - -With defaults: - - @startDocuBlockInline collectionDatabaseCreateSuccess - @EXAMPLE_ARANGOSH_OUTPUT{collectionDatabaseCreateSuccess} - c = db._create("users"); - c.properties(); - ~ db._drop("users"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionDatabaseCreateSuccess - -With properties: - - @startDocuBlockInline collectionDatabaseCreateProperties - @EXAMPLE_ARANGOSH_OUTPUT{collectionDatabaseCreateProperties} - |c = db._create("users", { waitForSync : true, - journalSize : 1024 * 1204}); - c.properties(); - ~ db._drop("users"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionDatabaseCreateProperties - -With a key generator: - - @startDocuBlockInline collectionDatabaseCreateKey - @EXAMPLE_ARANGOSH_OUTPUT{collectionDatabaseCreateKey} - | db._create("users", - { keyOptions: { type: "autoincrement", offset: 10, increment: 5 } }); - db.users.save({ name: "user 1" }); - db.users.save({ name: "user 2" }); - db.users.save({ name: "user 3" }); - ~ db._drop("users"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionDatabaseCreateKey - -With a special key option: - - @startDocuBlockInline collectionDatabaseCreateSpecialKey - @EXAMPLE_ARANGOSH_OUTPUT{collectionDatabaseCreateSpecialKey} - db._create("users", { keyOptions: { allowUserKeys: false } }); - db.users.save({ name: "user 1" }); - db.users.save({ name: "user 2", _key: "myuser" }); // xpError(ERROR_ARANGO_DOCUMENT_KEY_UNEXPECTED) - db.users.save({ name: "user 3" }); - ~ db._drop("users"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionDatabaseCreateSpecialKey - - - - - -creates a new edge collection -`db._createEdgeCollection(collection-name)` - -Creates a new edge collection named *collection-name*. If the -collection name already exists an error is thrown. The default value -for *waitForSync* is *false*. - -`db._createEdgeCollection(collection-name, properties)` - -*properties* must be an object with the following attributes: - -- *waitForSync* (optional, default *false*): If *true* creating - a document will only return after the data was synced to disk. -- *journalSize* (optional, default is - "configuration parameter"): The maximal size of - a journal or datafile. Note that this also limits the maximal - size of a single object and must be at least 1MB. - - - - - - -creates a new document collection -`db._createDocumentCollection(collection-name)` - -Creates a new document collection named *collection-name*. If the -document name already exists and error is thrown. - - -All Collections ---------------- - - - - -returns all collections -`db._collections()` - -Returns all collections of the given database. - - -**Examples** - - - @startDocuBlockInline collectionsDatabaseName - @EXAMPLE_ARANGOSH_OUTPUT{collectionsDatabaseName} - ~ db._create("example"); - db._collections(); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionsDatabaseName - - - -Collection Name ---------------- - - - - -selects a collection from the vocbase -`db.collection-name` - -Returns the collection with the given *collection-name*. If no such -collection exists, create a collection named *collection-name* with the -default properties. - - -**Examples** - - - @startDocuBlockInline collectionDatabaseCollectionName - @EXAMPLE_ARANGOSH_OUTPUT{collectionDatabaseCollectionName} - ~ db._create("example"); - db.example; - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionDatabaseCollectionName - - - -Drop ----- - - - - -drops a collection -`db._drop(collection)` - -Drops a *collection* and all its indexes and data. - -`db._drop(collection-identifier)` - -Drops a collection identified by *collection-identifier* with all its -indexes and data. No error is thrown if there is no such collection. - -`db._drop(collection-name)` - -Drops a collection named *collection-name* and all its indexes. No error -is thrown if there is no such collection. - -`db._drop(collection-name, options)` - -In order to drop a system collection, one must specify an *options* object -with attribute *isSystem* set to *true*. Otherwise it is not possible to -drop system collections. - -**Note**: cluster collection, which are prototypes for collections -with *distributeShardsLike* parameter, cannot be dropped. - -*Examples* - -Drops a collection: - - @startDocuBlockInline collectionDatabaseDropByObject - @EXAMPLE_ARANGOSH_OUTPUT{collectionDatabaseDropByObject} - ~ db._create("example"); - col = db.example; - db._drop(col); - col; - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionDatabaseDropByObject - -Drops a collection identified by name: - - @startDocuBlockInline collectionDatabaseDropName - @EXAMPLE_ARANGOSH_OUTPUT{collectionDatabaseDropName} - ~ db._create("example"); - col = db.example; - db._drop("example"); - col; - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionDatabaseDropName - -Drops a system collection - - @startDocuBlockInline collectionDatabaseDropSystem - @EXAMPLE_ARANGOSH_OUTPUT{collectionDatabaseDropSystem} - ~ db._create("_example", { isSystem: true }); - col = db._example; - db._drop("_example", { isSystem: true }); - col; - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionDatabaseDropSystem - -Truncate --------- - - - - -truncates a collection -`db._truncate(collection)` - -Truncates a *collection*, removing all documents but keeping all its -indexes. - -`db._truncate(collection-identifier)` - -Truncates a collection identified by *collection-identified*. No error is -thrown if there is no such collection. - -`db._truncate(collection-name)` - -Truncates a collection named *collection-name*. No error is thrown if -there is no such collection. - - -**Examples** - - -Truncates a collection: - - @startDocuBlockInline collectionDatabaseTruncateByObject - @EXAMPLE_ARANGOSH_OUTPUT{collectionDatabaseTruncateByObject} - ~ db._create("example"); - col = db.example; - col.save({ "Hello" : "World" }); - col.count(); - db._truncate(col); - col.count(); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionDatabaseTruncateByObject - -Truncates a collection identified by name: - - @startDocuBlockInline collectionDatabaseTruncateName - @EXAMPLE_ARANGOSH_OUTPUT{collectionDatabaseTruncateName} - ~ db._create("example"); - col = db.example; - col.save({ "Hello" : "World" }); - col.count(); - db._truncate("example"); - col.count(); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionDatabaseTruncateName - - diff --git a/Documentation/Books/Manual/DataModeling/Collections/README.md b/Documentation/Books/Manual/DataModeling/Collections/README.md deleted file mode 100644 index f2821272fa50..000000000000 --- a/Documentation/Books/Manual/DataModeling/Collections/README.md +++ /dev/null @@ -1,93 +0,0 @@ -JavaScript Interface to Collections -=================================== - -This is an introduction to ArangoDB's interface for collections and how to handle -collections from the JavaScript shell _arangosh_. For other languages see the -corresponding language API. - -The most important call is the call to create a new collection. - -Address of a Collection ------------------------ - -All collections in ArangoDB have a unique identifier and a unique -name. The namespace for collections is shared with views, so there cannot exist -a collection and a view with the same name in the same database. ArangoDB -internally uses the collection's unique identifier to look up collections. This -identifier, however, is managed by ArangoDB and the user has no control over it. -In order to allow users to use their own names, each collection also has a -unique name which is specified by the user. To access a collection from the user -perspective, the [collection name](../../Appendix/Glossary.md#collection-name) -should be used, i.e.: - -### Collection -`db._collection(collection-name)` - -A collection is created by a ["db._create"](DatabaseMethods.md) call. - -For example: Assume that the [collection identifier](../../Appendix/Glossary.md#collection-identifier) is *7254820* and the name is -*demo*, then the collection can be accessed as: - - db._collection("demo") - -If no collection with such a name exists, then *null* is returned. - -There is a short-cut that can be used for non-system collections: - -### Collection name -`db.collection-name` - -This call will either return the collection named *db.collection-name* or create -a new one with that name and a set of default properties. - -**Note**: Creating a collection on the fly using *db.collection-name* is -not recommend and does not work in _arangosh_. To create a new collection, please -use - -### Create -`db._create(collection-name)` - -This call will create a new collection called *collection-name*. -This method is a database method and is documented in detail at [Database Methods](DatabaseMethods.md#create) - -### Synchronous replication - -Starting in ArangoDB 3.0, the distributed version offers synchronous -replication, which means that there is the option to replicate all data -automatically within the ArangoDB cluster. This is configured for sharded -collections on a per collection basis by specifying a "replication factor" -when the collection is created. A replication factor of k means that -altogether k copies of each shard are kept in the cluster on k different -servers, and are kept in sync. That is, every write operation is automatically -replicated on all copies. - -This is organized using a leader/follower model. At all times, one of the -servers holding replicas for a shard is "the leader" and all others -are "followers", this configuration is held in the Agency (see -[Cluster](../../Architecture/DeploymentModes/Cluster/README.md) for details of the ArangoDB -cluster architecture). Every write operation is sent to the leader -by one of the coordinators, and then replicated to all followers -before the operation is reported to have succeeded. The leader keeps -a record of which followers are currently in sync. In case of network -problems or a failure of a follower, a leader can and will drop a follower -temporarily after 3 seconds, such that service can resume. In due course, -the follower will automatically resynchronize with the leader to restore -resilience. - -If a leader fails, the cluster Agency automatically initiates a failover -routine after around 15 seconds, promoting one of the followers to -leader. The other followers (and the former leader, when it comes back), -automatically resynchronize with the new leader to restore resilience. -Usually, this whole failover procedure can be handled transparently -for the coordinator, such that the user code does not even see an error -message. - -Obviously, this fault tolerance comes at a cost of increased latency. -Each write operation needs an additional network roundtrip for the -synchronous replication of the followers, but all replication operations -to all followers happen concurrently. This is, why the default replication -factor is 1, which means no replication. - -For details on how to switch on synchronous replication for a collection, -see the database method `db._create(collection-name)` in the section about -[Database Methods](DatabaseMethods.md#create). diff --git a/Documentation/Books/Manual/DataModeling/Concepts.md b/Documentation/Books/Manual/DataModeling/Concepts.md deleted file mode 100644 index a9e41547e358..000000000000 --- a/Documentation/Books/Manual/DataModeling/Concepts.md +++ /dev/null @@ -1,82 +0,0 @@ -Concepts -======== - -Database Interaction --------------------- - -ArangoDB is a database that serves documents to clients. These documents are -transported using [JSON](https://en.wikipedia.org/wiki/JSON) via a TCP connection, -using the HTTP protocol. A [REST API](https://en.wikipedia.org/wiki/Representational_state_transfer) -is provided to interact with the database system. - -The [web interface](../Programs/WebInterface/README.md) that comes with -ArangoDB, called *Aardvark*, provides graphical user interface that is easy to use. -An [interactive shell](../Programs/Arangosh/README.md), called *Arangosh*, is also -shipped. In addition, there are so called [drivers](../../Drivers/index.html) -that make it easy to use the database system in various environments and -programming languages. All these tools use the HTTP interface of the server and -remove the necessity to roll own low-level code for basic communication in most -cases. - -Data model ----------- - -The documents you can store in ArangoDB closely follow the JSON format, -although they are stored in a binary format called [VelocyPack](https://github.com/arangodb/velocypack#readme). -A **document** contains zero or more attributes, each of these attributes having -a value. A value can either be an atomic type, i. e. number, string, boolean -or null, or a compound type, i.e. an array or embedded document / object. -Arrays and sub-objects can contain all of these types, which means that -arbitrarily nested data structures can be represented in a single document. - -Documents are grouped into **collections**. A collection contains zero or more -documents. If you are familiar with relational database management systems (RDBMS) -then it is safe to compare collections to tables and documents to rows. The -difference is that in a traditional RDBMS, you have to define columns before -you can store records in a table. Such definitions are also known as schemas. -ArangoDB is schema-less, which means that there is no need to define what -attributes a document can have. Every single document can have a completely -different structure and still be stored together with other documents in a -single collection. In practice, there will be common denominators among the -documents in a collection, but the database system itself doesn't force you to -limit yourself to a certain data structure. - -There are two types of collections: **document collection** (also refered to as -*vertex collections* in the context of graphs) as well as **edge collections**. -Edge collections store documents as well, but they include two special attributes, -*_from* and *_to*, which are used to create relations between documents. -Usually, two documents (**vertices**) stored in document collections are linked -by a document (**edge**) stored in an edge collection. This is ArangoDB's graph -data model. It follows the mathematical concept of a directed, labeled graph, -except that edges don't just have labels, but are full-blown documents. - -Collections exist inside of **databases**. There can be one or many databases. -Different databases are usually used for multi tenant setups, as the data inside -them (collections, documents etc.) is isolated from one another. The default -database *_system* is special, because it cannot be removed. Database users -are managed in this database, and their credentials are valid for all databases -of a server instance. - -Similarly **databases** may also contain **view** entities. A -[View](Views/README.md) in its simplest form can be seen as a read-only -array or collection of documents. The view concept quite closely matches a -similarly named concept available in most relational database management systems -(RDBMS). Each view entity usually maps some implementation specific document -transformation, (possibly identity), onto documents from zero or more -collections. - -Data Retrieval --------------- - -**Queries** are used to filter documents based on certain criteria, to compute -new data, as well as to manipulate or delete existing documents. Queries can be -as simple as a "query by example" or as complex as ["joins"](../../AQL/Examples/Join.html) -using many collections or traversing graph structures. They are written in -the [ArangoDB Query Language](../../AQL/index.html) (AQL). - -**Cursors** are used to iterate over the result of queries, so that you get -easily processable batches instead of one big hunk. - -**Indexes** are used to speed up searches. There are various types of indexes, -such as [hash indexes](../Indexing/Hash.md) -and [geo-spatial indexes](../Indexing/Geo.md). diff --git a/Documentation/Books/Manual/DataModeling/Databases/Notes.md b/Documentation/Books/Manual/DataModeling/Databases/Notes.md deleted file mode 100644 index c1cbfd79397f..000000000000 --- a/Documentation/Books/Manual/DataModeling/Databases/Notes.md +++ /dev/null @@ -1,19 +0,0 @@ -Notes about Databases -===================== - -Please keep in mind that each database contains its own system collections, -which need to be set up when a database is created. This will make the creation -of a database take a while. - -Replication is either configured on a -[per-database level](../../Administration/MasterSlave/DatabaseSetup.md) -or on [server level](../../Administration/MasterSlave/ServerLevelSetup.md). -In a per-database setup, any replication logging or applying for a new database -must be configured explicitly after a new database has been created, whereas all -databases are automatically replicated in case of the server-level setup using the global replication applier. - -Foxx applications -are also available only in the context of the database they have been installed -in. A new database will only provide access to the system applications shipped -with ArangoDB (that is the web interface at the moment) and no other Foxx -applications until they are explicitly installed for the particular database. diff --git a/Documentation/Books/Manual/DataModeling/Databases/README.md b/Documentation/Books/Manual/DataModeling/Databases/README.md deleted file mode 100644 index f04f9f58b9c1..000000000000 --- a/Documentation/Books/Manual/DataModeling/Databases/README.md +++ /dev/null @@ -1,31 +0,0 @@ -Handling Databases -================== - -This is an introduction to managing databases in ArangoDB from within -JavaScript. - -When you have an established connection to ArangoDB, the current -database can be changed explicitly using the *db._useDatabase()* -method. This will switch to the specified database (provided it -exists and the user can connect to it). From this point on, any -following action in the same shell or connection will use the -specified database, unless otherwise specified. - -*Note*: If the database is changed, client drivers need to store the -current database name on their side, too. This is because connections -in ArangoDB do not contain any state information. All state information -is contained in the HTTP request/response data. - -To connect to a specific database after arangosh has started use the command -described above. It is also possible to specify a database name when invoking -arangosh. For this purpose, use the command-line parameter *--server.database*, -e.g. - - > arangosh --server.database test - -Please note that commands, actions, scripts or AQL queries should never -access multiple databases, even if they exist. The only intended and -supported way in ArangoDB is to use one database at a time for a command, -an action, a script or a query. Operations started in one database must -not switch the database later and continue operating in another. - diff --git a/Documentation/Books/Manual/DataModeling/Databases/WorkingWith.md b/Documentation/Books/Manual/DataModeling/Databases/WorkingWith.md deleted file mode 100644 index 688351039278..000000000000 --- a/Documentation/Books/Manual/DataModeling/Databases/WorkingWith.md +++ /dev/null @@ -1,205 +0,0 @@ -Working with Databases -====================== - -Database Methods ----------------- - -The following methods are available to manage databases via JavaScript. -Please note that several of these methods can be used from the _system -database only. - -### Name - - - -return the database name -`db._name()` - -Returns the name of the current database as a string. - - -**Examples** - - -@startDocuBlockInline dbName -@EXAMPLE_ARANGOSH_OUTPUT{dbName} - require("@arangodb").db._name(); -@END_EXAMPLE_ARANGOSH_OUTPUT -@endDocuBlock dbName - - -### ID - - - -return the database id -`db._id()` - -Returns the id of the current database as a string. - - -**Examples** - - -@startDocuBlockInline dbId -@EXAMPLE_ARANGOSH_OUTPUT{dbId} - require("@arangodb").db._id(); -@END_EXAMPLE_ARANGOSH_OUTPUT -@endDocuBlock dbId - - -### Path - - - -return the path to database files -`db._path()` - -Returns the filesystem path of the current database as a string. - - -**Examples** - - -@startDocuBlockInline dbPath -@EXAMPLE_ARANGOSH_OUTPUT{dbPath} - require("@arangodb").db._path(); -@END_EXAMPLE_ARANGOSH_OUTPUT -@endDocuBlock dbPath - - -### isSystem - - - -return the database type -`db._isSystem()` - -Returns whether the currently used database is the *_system* database. -The system database has some special privileges and properties, for example, -database management operations such as create or drop can only be executed -from within this database. Additionally, the *_system* database itself -cannot be dropped. - - -### Use Database - - - -change the current database -`db._useDatabase(name)` - -Changes the current database to the database specified by *name*. Note -that the database specified by *name* must already exist. - -Changing the database might be disallowed in some contexts, for example -server-side actions (including Foxx). - -When performing this command from arangosh, the current credentials (username -and password) will be re-used. These credentials might not be valid to -connect to the database specified by *name*. Additionally, the database -only be accessed from certain endpoints only. In this case, switching the -database might not work, and the connection / session should be closed and -restarted with different username and password credentials and/or -endpoint data. - - -### List Databases - - - -return the list of all existing databases -`db._databases()` - -Returns the list of all databases. This method can only be used from within -the *_system* database. - - -### Create Database - - - -create a new database -`db._createDatabase(name, options, users)` - -Creates a new database with the name specified by *name*. -There are restrictions for database names -(see [DatabaseNames](../NamingConventions/DatabaseNames.md)). - -Note that even if the database is created successfully, there will be no -change into the current database to the new database. Changing the current -database must explicitly be requested by using the -*db._useDatabase* method. - -The *options* attribute currently has no meaning and is reserved for -future use. - -The optional *users* attribute can be used to create initial users for -the new database. If specified, it must be a list of user objects. Each user -object can contain the following attributes: - -* *username*: the user name as a string. This attribute is mandatory. -* *passwd*: the user password as a string. If not specified, then it defaults - to an empty string. -* *active*: a boolean flag indicating whether the user account should be - active or not. The default value is *true*. -* *extra*: an optional JSON object with extra user information. The data - contained in *extra* will be stored for the user but not be interpreted - further by ArangoDB. - -If no initial users are specified, a default user *root* will be created -with an empty string password. This ensures that the new database will be -accessible via HTTP after it is created. - -You can create users in a database if no initial user is specified. Switch -into the new database (username and password must be identical to the current -session) and add or modify users with the following commands. - -```js - require("@arangodb/users").save(username, password, true); - require("@arangodb/users").update(username, password, true); - require("@arangodb/users").remove(username); -``` -Alternatively, you can specify user data directly. For example: - -```js - db._createDatabase("newDB", {}, [{ username: "newUser", passwd: "123456", active: true}]) -``` - -Those methods can only be used from within the *_system* database. - - -### Drop Database - - - -drop an existing database -`db._dropDatabase(name)` - -Drops the database specified by *name*. The database specified by -*name* must exist. - -**Note**: Dropping databases is only possible from within the *_system* -database. The *_system* database itself cannot be dropped. - -Databases are dropped asynchronously, and will be physically removed if -all clients have disconnected and references have been garbage-collected. - -### Engine - -retrieve the storage engine type used by the server -`db._engine()` - -Returns the name of the storage engine in use (`mmfiles` or `rocksdb`), as well -as a list of supported features (types of indexes and -[dfdb](../../Programs/Arango-dfdb/README.md)). - -### Engine statistics - -retrieve statistics related to the storage engine (rocksdb) -`db._engineStats()` - -Returns some statistics related to the storage engine activity, including figures -about data size, cache usage, etc. - -**Note**: Currently this only produces useful output for the RocksDB engine. diff --git a/Documentation/Books/Manual/DataModeling/Documents/DatabaseMethods.md b/Documentation/Books/Manual/DataModeling/Documents/DatabaseMethods.md deleted file mode 100644 index b3af37ec8f6b..000000000000 --- a/Documentation/Books/Manual/DataModeling/Documents/DatabaseMethods.md +++ /dev/null @@ -1,345 +0,0 @@ -Database Methods -================ - -Document --------- - - - - -`db._document(object)` - -The *_document* method finds a document given an object *object* -containing the *_id* attribute. The method returns -the document if it can be found. - -An error is thrown if *_rev* is specified but the document found has a -different revision already. An error is also thrown if no document exists -with the given *_id*. - -Please note that if the method is executed on the arangod server (e.g. from -inside a Foxx application), an immutable document object will be returned -for performance reasons. It is not possible to change attributes of this -immutable object. To update or patch the returned document, it needs to be -cloned/copied into a regular JavaScript object first. This is not necessary -if the *_document* method is called from out of arangosh or from any other -client. - -`db._document(document-handle)` - -As before. Instead of *object* a *document-handle* can be passed as -first argument. No revision can be specified in this case. - - -**Examples** - - -Returns the document: - - @startDocuBlockInline documentsDocumentName - @EXAMPLE_ARANGOSH_OUTPUT{documentsDocumentName} - ~ db._create("example"); - ~ var myid = db.example.insert({_key: "12345"}); - db._document("example/12345"); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentsDocumentName - - - -Exists ------- - - - - -`db._exists(object)` - -The *_exists* method determines whether a document exists given an object -`object` containing the *_id* attribute. - -An error is thrown if *_rev* is specified but the document found has a -different revision already. - -Instead of returning the found document or an error, this method will -only return an object with the attributes *_id*, *_key* and *_rev*, or -*false* if no document with the given *_id* or *_key* exists. It can -thus be used for easy existence checks. - -This method will throw an error if used improperly, e.g. when called -with a non-document handle, a non-document, or when a cross-collection -request is performed. - -`db._exists(document-handle)` - -As before. Instead of *object* a *document-handle* can be passed as -first argument. - -**Changes in 3.0 from 2.8:** - -In the case of a revision mismatch *_exists* now throws an error instead -of simply returning *false*. This is to make it possible to tell the -difference between a revision mismatch and a non-existing document. - - -Replace -------- - - - - -`db._replace(selector, data)` - -Replaces an existing document described by the *selector*, which must -be an object containing the *_id* attribute. There must be -a document with that *_id* in the current database. This -document is then replaced with the *data* given as second argument. -Any attribute *_id*, *_key* or *_rev* in *data* is ignored. - -The method returns a document with the attributes *_id*, *_key*, *_rev* -and *_oldRev*. The attribute *_id* contains the document handle of the -updated document, the attribute *_rev* contains the document revision of -the updated document, the attribute *_oldRev* contains the revision of -the old (now replaced) document. - -If the selector contains a *_rev* attribute, the method first checks -that the specified revision is the current revision of that document. -If not, there is a conflict, and an error is thrown. - -`collection.replace(selector, data, options)` - -As before, but *options* must be an object that can contain the following -boolean attributes: - - - *waitForSync*: One can force - synchronization of the document creation operation to disk even in - case that the *waitForSync* flag is been disabled for the entire - collection. Thus, the *waitForSync* option can be used to force - synchronization of just specific operations. To use this, set the - *waitForSync* parameter to *true*. If the *waitForSync* parameter - is not specified or set to *false*, then the collection's default - *waitForSync* behavior is applied. The *waitForSync* parameter - cannot be used to disable synchronization for collections that have - a default *waitForSync* value of *true*. - - *overwrite*: If this flag is set to *true*, a *_rev* attribute in - the selector is ignored. - - *returnNew*: If this flag is set to *true*, the complete new document - is returned in the output under the attribute *new*. - - *returnOld*: If this flag is set to *true*, the complete previous - revision of the document is returned in the output under the - attribute *old*. - - *silent*: If this flag is set to *true*, no output is returned. - -`db._replace(document-handle, data)` - -`db._replace(document-handle, data, options)` - -As before. Instead of *selector* a *document-handle* can be passed as -first argument. No revision precondition is tested. - - -**Examples** - - -Create and replace a document: - - @startDocuBlockInline documentsDocumentReplace - @EXAMPLE_ARANGOSH_OUTPUT{documentsDocumentReplace} - ~ db._create("example"); - a1 = db.example.insert({ a : 1 }); - a2 = db._replace(a1, { a : 2 }); - a3 = db._replace(a1, { a : 3 }); // xpError(ERROR_ARANGO_CONFLICT); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentsDocumentReplace - - -**Changes in 3.0 from 2.8:** - -The options *silent*, *returnNew* and *returnOld* are new. - - -Update ------- - - - -`db._update(selector, data)` - -Updates an existing document described by the *selector*, which must -be an object containing the *_id* attribute. There must be -a document with that *_id* in the current database. This -document is then patched with the *data* given as second argument. -Any attribute *_id*, *_key* or *_rev* in *data* is ignored. - -The method returns a document with the attributes *_id*, *_key*, *_rev* -and *_oldRev*. The attribute *_id* contains the document handle of the -updated document, the attribute *_rev* contains the document revision of -the updated document, the attribute *_oldRev* contains the revision of -the old (now updated) document. - -If the selector contains a *_rev* attribute, the method first checks -that the specified revision is the current revision of that document. -If not, there is a conflict, and an error is thrown. - -`db._update(selector, data, options)` - -As before, but *options* must be an object that can contain the following -boolean attributes: - - - *waitForSync*: One can force - synchronization of the document creation operation to disk even in - case that the *waitForSync* flag is been disabled for the entire - collection. Thus, the *waitForSync* option can be used to force - synchronization of just specific operations. To use this, set the - *waitForSync* parameter to *true*. If the *waitForSync* parameter - is not specified or set to *false*, then the collection's default - *waitForSync* behavior is applied. The *waitForSync* parameter - cannot be used to disable synchronization for collections that have - a default *waitForSync* value of *true*. - - *overwrite*: If this flag is set to *true*, a *_rev* attribute in - the selector is ignored. - - *returnNew*: If this flag is set to *true*, the complete new document - is returned in the output under the attribute *new*. - - *returnOld*: If this flag is set to *true*, the complete previous - revision of the document is returned in the output under the - attribute *old*. - - *silent*: If this flag is set to *true*, no output is returned. - - *keepNull*: The optional *keepNull* parameter can be used to modify - the behavior when handling *null* values. Normally, *null* values - are stored in the database. By setting the *keepNull* parameter to - *false*, this behavior can be changed so that all attributes in - *data* with *null* values will be removed from the target document. - - *mergeObjects*: Controls whether objects (not arrays) will be - merged if present in both the existing and the patch document. If - set to *false*, the value in the patch document will overwrite the - existing document's value. If set to *true*, objects will be merged. - The default is *true*. - - -`db._update(document-handle, data)` - -`db._update(document-handle, data, options)` - -As before. Instead of *selector* a *document-handle* can be passed as -first argument. No revision precondition is tested. - - -**Examples** - - -Create and update a document: - - @startDocuBlockInline documentDocumentUpdate - @EXAMPLE_ARANGOSH_OUTPUT{documentDocumentUpdate} - ~ db._create("example"); - a1 = db.example.insert({ a : 1 }); - a2 = db._update(a1, { b : 2 }); - a3 = db._update(a1, { c : 3 }); // xpError(ERROR_ARANGO_CONFLICT); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentDocumentUpdate - - -**Changes in 3.0 from 2.8:** - -The options *silent*, *returnNew* and *returnOld* are new. - - -Remove ------- - - - - -`db._remove(selector)` - -Removes a document described by the *selector*, which must be an object -containing the *_id* attribute. There must be a document with -that *_id* in the current database. This document is then -removed. - -The method returns a document with the attributes *_id*, *_key* and *_rev*. -The attribute *_id* contains the document handle of the -removed document, the attribute *_rev* contains the document revision of -the removed eocument. - -If the selector contains a *_rev* attribute, the method first checks -that the specified revision is the current revision of that document. -If not, there is a conflict, and an error is thrown. - -`db._remove(selector, options)` - -As before, but *options* must be an object that can contain the following -boolean attributes: - - - *waitForSync*: One can force - synchronization of the document creation operation to disk even in - case that the *waitForSync* flag is been disabled for the entire - collection. Thus, the *waitForSync* option can be used to force - synchronization of just specific operations. To use this, set the - *waitForSync* parameter to *true*. If the *waitForSync* parameter - is not specified or set to *false*, then the collection's default - *waitForSync* behavior is applied. The *waitForSync* parameter - cannot be used to disable synchronization for collections that have - a default *waitForSync* value of *true*. - - *overwrite*: If this flag is set to *true*, a *_rev* attribute in - the selector is ignored. - - *returnOld*: If this flag is set to *true*, the complete previous - revision of the document is returned in the output under the - attribute *old*. - - *silent*: If this flag is set to *true*, no output is returned. - -`db._remove(document-handle)` - -`db._remove(document-handle, options)` - -As before. Instead of *selector* a *document-handle* can be passed as -first argument. No revision check is performed. - -**Examples** - - -Remove a document: - - @startDocuBlockInline documentsCollectionRemoveSuccess - @EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionRemoveSuccess} - ~ db._create("example"); - a1 = db.example.insert({ a : 1 }); - db._remove(a1); - db._remove(a1); // xpError(ERROR_ARANGO_DOCUMENT_NOT_FOUND); - db._remove(a1, {overwrite: true}); // xpError(ERROR_ARANGO_DOCUMENT_NOT_FOUND); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentsCollectionRemoveSuccess - -Remove the document in the revision `a1` with a conflict: - - @startDocuBlockInline documentsCollectionRemoveConflict - @EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionRemoveConflict} - ~ db._create("example"); - a1 = db.example.insert({ a : 1 }); - a2 = db._replace(a1, { a : 2 }); - db._remove(a1); // xpError(ERROR_ARANGO_CONFLICT) - db._remove(a1, {overwrite: true} ); - db._document(a1); // xpError(ERROR_ARANGO_DOCUMENT_NOT_FOUND) - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentsCollectionRemoveConflict - -Remove a document using new signature: - - @startDocuBlockInline documentsCollectionRemoveSignature - @EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionRemoveSignature} - ~ db._create("example"); - db.example.insert({ _key: "11265325374", a: 1 } ); - | db.example.remove("example/11265325374", - { overwrite: true, waitForSync: false}) - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentsCollectionRemoveSignature - -**Changes in 3.0 from 2.8:** - -The method now returns not only *true* but information about the removed -document(s). The options *silent* and *returnOld* are new. diff --git a/Documentation/Books/Manual/DataModeling/Documents/DocumentAddress.md b/Documentation/Books/Manual/DataModeling/Documents/DocumentAddress.md deleted file mode 100644 index fc2ae70a89c4..000000000000 --- a/Documentation/Books/Manual/DataModeling/Documents/DocumentAddress.md +++ /dev/null @@ -1,101 +0,0 @@ -Basics and Terminology -====================== - -Documents in ArangoDB are JSON objects. These objects can be nested (to -any depth) and may contain lists. Each document has a unique -[primary key](../../Appendix/Glossary.md#document-key) which -identifies it within its collection. Furthermore, each document is -uniquely identified -by its [document handle](../../Appendix/Glossary.md#document-handle) -across all collections in the same database. Different revisions of -the same document (identified by its handle) can be distinguished by their -[document revision](../../Appendix/Glossary.md#document-revision). -Any transaction only ever sees a single revision of a document. -For example: - -```js -{ - "_id" : "myusers/3456789", - "_key" : "3456789", - "_rev" : "14253647", - "firstName" : "John", - "lastName" : "Doe", - "address" : { - "street" : "Road To Nowhere 1", - "city" : "Gotham" - }, - "hobbies" : [ - {name: "swimming", howFavorite: 10}, - {name: "biking", howFavorite: 6}, - {name: "programming", howFavorite: 4} - ] -} -``` - -All documents contain special attributes: the -[document handle](../../Appendix/Glossary.md#document-handle) is stored -as a string in `_id`, the -[document's primary key](../../Appendix/Glossary.md#document-key) in -`_key` and the -[document revision](../../Appendix/Glossary.md#document-revision) in -`_rev`. The value of the `_key` attribute can be specified by the user when -creating a document. `_id` and `_key` values are immutable once the document -has been created. The `_rev` value is maintained by ArangoDB automatically. - - -Document Handle ---------------- - -A document handle uniquely identifies a document in the database. It -is a string and consists of the collection's name and the document key -(`_key` attribute) separated by `/`. - - -Document Key ------------- - -A document key uniquely identifies a document in the collection it is -stored in. It can and should be used by clients when specific documents -are queried. The document key is stored in the `_key` attribute of -each document. The key values are automatically indexed by ArangoDB in -a collection's primary index. Thus looking up a document by its -key is a fast operation. The _key value of a document is -immutable once the document has been created. By default, ArangoDB will -auto-generate a document key if no _key attribute is specified, and use -the user-specified _key otherwise. The generated _key is guaranteed to -be unique in the collection it was generated for. This also applies to -sharded collections in a cluster. It can't be guaranteed that the _key is -unique within a database or across a whole node or instance however. - -This behavior can be changed on a per-collection level by creating -collections with the `keyOptions` attribute. - -Using `keyOptions` it is possible to disallow user-specified keys -completely, or to force a specific regime for auto-generating the `_key` -values. - - -Document Revision ------------------ - -@startDocuBlock documentRevision - -Multiple Documents in a single Command --------------------------------------- - -Beginning with ArangoDB 3.0 the basic document API has been extended -to handle not only single documents but multiple documents in a single -command. This is crucial for performance, in particular in the cluster -situation, in which a single request can involve multiple network hops -within the cluster. Another advantage is that it reduces the overhead of -individual network round trips between the client -and the server. The general idea to perform multiple document operations -in a single command is to use JSON arrays of objects in the place of a -single document. As a consequence, document keys, handles and revisions -for preconditions have to be supplied embedded in the individual documents -given. Multiple document operations are restricted to a single document -or edge collection. -See the [API descriptions for collection objects](DocumentMethods.md) -for details. Note that the [API for database objects](DatabaseMethods.md) -do not offer these operations. - diff --git a/Documentation/Books/Manual/DataModeling/Documents/DocumentMethods.md b/Documentation/Books/Manual/DataModeling/Documents/DocumentMethods.md deleted file mode 100644 index 1743efd8a12f..000000000000 --- a/Documentation/Books/Manual/DataModeling/Documents/DocumentMethods.md +++ /dev/null @@ -1,1369 +0,0 @@ -Collection Methods -================== - -All ---- - - - - -`collection.all()` - -Fetches all documents from a collection and returns a cursor. You can use -*toArray*, *next*, or *hasNext* to access the result. The result -can be limited using the *skip* and *limit* operator. - - -**Examples** - - -Use *toArray* to get all documents at once: - - @startDocuBlockInline 001_collectionAll - @EXAMPLE_ARANGOSH_OUTPUT{001_collectionAll} - ~ db._create("five"); - db.five.insert({ name : "one" }); - db.five.insert({ name : "two" }); - db.five.insert({ name : "three" }); - db.five.insert({ name : "four" }); - db.five.insert({ name : "five" }); - db.five.all().toArray(); - ~ db._drop("five"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 001_collectionAll - -Use *limit* to restrict the documents: - - @startDocuBlockInline 002_collectionAllNext - @EXAMPLE_ARANGOSH_OUTPUT{002_collectionAllNext} - ~ db._create("five"); - db.five.insert({ name : "one" }); - db.five.insert({ name : "two" }); - db.five.insert({ name : "three" }); - db.five.insert({ name : "four" }); - db.five.insert({ name : "five" }); - db.five.all().limit(2).toArray(); - ~ db._drop("five"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 002_collectionAllNext - - - -Query by example ----------------- - - - - -`collection.byExample(example)` - -Fetches all documents from a collection that match the specified -example and returns a cursor. - -You can use *toArray*, *next*, or *hasNext* to access the -result. The result can be limited using the *skip* and *limit* -operator. - -An attribute name of the form *a.b* is interpreted as attribute path, -not as attribute. If you use - -```json -{ "a" : { "c" : 1 } } -``` - -as example, then you will find all documents, such that the attribute -*a* contains a document of the form *{c : 1 }*. For example the document - -```json -{ "a" : { "c" : 1 }, "b" : 1 } -``` - -will match, but the document - -```json -{ "a" : { "c" : 1, "b" : 1 } } -``` - -will not. - -However, if you use - -```json -{ "a.c" : 1 } -``` - -then you will find all documents, which contain a sub-document in *a* -that has an attribute *c* of value *1*. Both the following documents - -```json -{ "a" : { "c" : 1 }, "b" : 1 } -``` - -and - -```json -{ "a" : { "c" : 1, "b" : 1 } } -``` - -will match. - -``` -collection.byExample(path1, value1, ...) -``` - -As alternative you can supply an array of paths and values. - -**Examples** - -Use *toArray* to get all documents at once: - - @startDocuBlockInline 003_collectionByExample - @EXAMPLE_ARANGOSH_OUTPUT{003_collectionByExample} - ~ db._create("users"); - db.users.insert({ name: "Gerhard" }); - db.users.insert({ name: "Helmut" }); - db.users.insert({ name: "Angela" }); - db.users.all().toArray(); - db.users.byExample({ "_id" : "users/20" }).toArray(); - db.users.byExample({ "name" : "Gerhard" }).toArray(); - db.users.byExample({ "name" : "Helmut", "_id" : "users/15" }).toArray(); - ~ db._drop("users"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 003_collectionByExample - - -Use *next* to loop over all documents: - - @startDocuBlockInline 004_collectionByExampleNext - @EXAMPLE_ARANGOSH_OUTPUT{004_collectionByExampleNext} - ~ db._create("users"); - db.users.insert({ name: "Gerhard" }); - db.users.insert({ name: "Helmut" }); - db.users.insert({ name: "Angela" }); - var a = db.users.byExample( {"name" : "Angela" } ); - while (a.hasNext()) print(a.next()); - ~ db._drop("users"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 004_collectionByExampleNext - -First Example -------------- - - - - -`collection.firstExample(example)` - -Returns some document of a collection that matches the specified -example. If no such document exists, *null* will be returned. -The example has to be specified as paths and values. -See *byExample* for details. - -`collection.firstExample(path1, value1, ...)` - -As alternative you can supply an array of paths and values. - - -**Examples** - - - @startDocuBlockInline collectionFirstExample - @EXAMPLE_ARANGOSH_OUTPUT{collectionFirstExample} - ~ db._create("users"); - ~ db.users.insert({ name: "Gerhard" }); - ~ db.users.insert({ name: "Helmut" }); - ~ db.users.insert({ name: "Angela" }); - db.users.firstExample("name", "Angela"); - ~ db._drop("users"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionFirstExample - - - -Range ------ - - - - -`collection.range(attribute, left, right)` - -Returns all documents from a collection such that the *attribute* is -greater or equal than *left* and strictly less than *right*. - -You can use *toArray*, *next*, or *hasNext* to access the -result. The result can be limited using the *skip* and *limit* -operator. - -An attribute name of the form *a.b* is interpreted as attribute path, -not as attribute. - -Note: the *range* simple query function is **deprecated** as of ArangoDB 2.6. -The function may be removed in future versions of ArangoDB. The preferred -way for retrieving documents from a collection within a specific range -is to use an AQL query as follows: - -```js -FOR doc IN @@collection - FILTER doc.value >= @left && doc.value < @right - LIMIT @skip, @limit - RETURN doc -``` - -**Examples** - -Use *toArray* to get all documents at once: - - @startDocuBlockInline 005_collectionRange - @EXAMPLE_ARANGOSH_OUTPUT{005_collectionRange} - ~ db._create("old"); - db.old.ensureIndex({ type: "skiplist", fields: [ "age" ] }); - db.old.insert({ age: 15 }); - db.old.insert({ age: 25 }); - db.old.insert({ age: 30 }); - db.old.range("age", 10, 30).toArray(); - ~ db._drop("old") - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 005_collectionRange - - -Closed range ------------- - - - - -`collection.closedRange(attribute, left, right)` - -Returns all documents of a collection such that the *attribute* is -greater or equal than *left* and less or equal than *right*. - -You can use *toArray*, *next*, or *hasNext* to access the -result. The result can be limited using the *skip* and *limit* -operator. - -An attribute name of the form *a.b* is interpreted as attribute path, -not as attribute. - -Note: the *closedRange* simple query function is **deprecated** as of ArangoDB 2.6. -The function may be removed in future versions of ArangoDB. The preferred -way for retrieving documents from a collection within a specific range -is to use an AQL query as follows: - -```js -FOR doc IN @@collection - FILTER doc.value >= @left && doc.value <= @right - LIMIT @skip, @limit - RETURN doc -``` - -**Examples** - -Use *toArray* to get all documents at once: - - @startDocuBlockInline 006_collectionClosedRange - @EXAMPLE_ARANGOSH_OUTPUT{006_collectionClosedRange} - ~ db._create("old"); - db.old.ensureIndex({ type: "skiplist", fields: [ "age" ] }); - db.old.insert({ age: 15 }); - db.old.insert({ age: 25 }); - db.old.insert({ age: 30 }); - db.old.closedRange("age", 10, 30).toArray(); - ~ db._drop("old") - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 006_collectionClosedRange - - -Any ---- - - - - -`collection.any()` - -Returns a random document from the collection or *null* if none exists. - -**Note**: this method is expensive when using the RocksDB storage engine. - - - -Count ------ - - - - -`collection.count()` - -Returns the number of living documents in the collection. - - -**Examples** - - - @startDocuBlockInline collectionCount - @EXAMPLE_ARANGOSH_OUTPUT{collectionCount} - ~ db._create("users"); - db.users.count(); - ~ db._drop("users"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionCount - - - -toArray -------- - - - - -`collection.toArray()` - -Converts the collection into an array of documents. Never use this call -in a production environment as it will basically create a copy of your -collection in RAM which will use resources depending on the number and size -of the documents in your collecion. - - -Document --------- - - - - -`collection.document(object)` - -The *document* method finds a document given an object *object* -containing the *_id* or *_key* attribute. The method returns -the document if it can be found. If both attributes are given, -the *_id* takes precedence, it is an error, if the collection part -of the *_id* does not match the *collection*. - -An error is thrown if *_rev* is specified but the document found has a -different revision already. An error is also thrown if no document exists -with the given *_id* or *_key* value. - -Please note that if the method is executed on the arangod server (e.g. from -inside a Foxx application), an immutable document object will be returned -for performance reasons. It is not possible to change attributes of this -immutable object. To update or patch the returned document, it needs to be -cloned/copied into a regular JavaScript object first. This is not necessary -if the *document* method is called from out of arangosh or from any other -client. - -`collection.document(document-handle)` - -As before. Instead of *object* a *document-handle* can be passed as -first argument. No revision can be specified in this case. - -`collection.document(document-key)` - -As before. Instead of *object* a *document-key* can be passed as -first argument. - -`collection.document(array)` - -This variant allows to perform the operation on a whole array of arguments. -The behavior is exactly as if *document* would have been called on all members -of the array separately and all results are returned in an array. If an error -occurs with any of the documents, no exception is risen! Instead of a document -an error object is returned in the result array. - -*Examples* - -Returns the document for a document-handle: - - @startDocuBlockInline documentsCollectionNameValidPlain - @EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionNameValidPlain} - ~ db._create("example"); - ~ var myid = db.example.insert({_key: "2873916"}); - db.example.document("example/2873916"); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentsCollectionNameValidPlain - -Returns the document for a document-key: - - @startDocuBlockInline documentsCollectionNameValidByKey - @EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionNameValidByKey} - ~ db._create("example"); - ~ var myid = db.example.insert({_key: "2873916"}); - db.example.document("2873916"); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentsCollectionNameValidByKey - -Returns the document for an object: - - @startDocuBlockInline documentsCollectionNameValidByObject - @EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionNameValidByObject} - ~ db._create("example"); - ~ var myid = db.example.insert({_key: "2873916"}); - db.example.document({_id: "example/2873916"}); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentsCollectionNameValidByObject - -Returns the document for an array of two keys: - - @startDocuBlockInline documentsCollectionNameValidMulti - @EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionNameValidMulti} - ~ db._create("example"); - ~ var myid = db.example.insert({_key: "2873916"}); - ~ var myid = db.example.insert({_key: "2873917"}); - db.example.document(["2873916","2873917"]); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentsCollectionNameValidMulti - -An error is raised if the document is unknown: - - @startDocuBlockInline documentsCollectionNameUnknown - @EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionNameUnknown} - ~ db._create("example"); - ~ var myid = db.example.insert({_key: "2873916"}); - db.example.document("example/4472917"); // xpError(ERROR_ARANGO_DOCUMENT_NOT_FOUND) - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentsCollectionNameUnknown - -An error is raised if the handle is invalid: - - @startDocuBlockInline documentsCollectionNameHandle - @EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionNameHandle} - ~ db._create("example"); - db.example.document(""); // xpError(ERROR_ARANGO_DOCUMENT_HANDLE_BAD) - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentsCollectionNameHandle - -**Changes in 3.0 from 2.8:** - -*document* can now query multiple documents with one call. - - -Exists ------- - - - - -checks whether a document exists -`collection.exists(object)` - -The *exists* method determines whether a document exists given an object -`object` containing the *_id* or *_key* attribute. If both attributes -are given, the *_id* takes precedence, it is an error, if the collection -part of the *_id* does not match the *collection*. - -An error is thrown if *_rev* is specified but the document found has a -different revision already. - -Instead of returning the found document or an error, this method will -only return an object with the attributes *_id*, *_key* and *_rev*, or -*false* if no document with the given *_id* or *_key* exists. It can -thus be used for easy existence checks. - -This method will throw an error if used improperly, e.g. when called -with a non-document handle, a non-document, or when a cross-collection -request is performed. - -`collection.exists(document-handle)` - -As before. Instead of *object* a *document-handle* can be passed as -first argument. - -`collection.exists(document-key)` - -As before. Instead of *object* a *document-key* can be passed as -first argument. - -`collection.exists(array)` - -This variant allows to perform the operation on a whole array of arguments. -The behavior is exactly as if *exists* would have been called on all -members of the array separately and all results are returned in an array. If an error -occurs with any of the documents, the operation stops immediately returning -only an error object. - -**Changes in 3.0 from 2.8:** - -In the case of a revision mismatch *exists* now throws an error instead -of simply returning *false*. This is to make it possible to tell the -difference between a revision mismatch and a non-existing document. - -*exists* can now query multiple documents with one call. - - -Lookup By Keys --------------- - - - - -`collection.documents(keys)` - -Looks up the documents in the specified collection using the array of -keys provided. All documents for which a matching key was specified in -the *keys* array and that exist in the collection will be returned. Keys -for which no document can be found in the underlying collection are -ignored, and no exception will be thrown for them. - -This method is deprecated in favour of the array variant of *document*. - -**Examples** - - - @startDocuBlockInline collectionLookupByKeys - @EXAMPLE_ARANGOSH_OUTPUT{collectionLookupByKeys} - ~ db._drop("example"); - ~ db._create("example"); - keys = [ ]; - | for (var i = 0; i < 10; ++i) { - | db.example.insert({ _key: "test" + i, value: i }); - | keys.push("test" + i); - } - db.example.documents(keys); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionLookupByKeys - - -Insert / Save -------------- - - - -Note: since ArangoDB 2.2, _insert_ is an alias for _save_. - -`collection.insert(data)`
-`collection.save(data)` - -Creates a new document in the *collection* from the given *data*. The -*data* must be an object. The attributes *_id* and *_rev* are ignored -and are automatically generated. A unique value for the attribute *_key* -will be automatically generated if not specified. If specified, there -must not be a document with the given *_key* in the collection. - -The method returns a document with the attributes *_id*, *_key* and -*_rev*. The attribute *_id* contains the document handle of the newly -created document, the attribute *_key* the document key and the -attribute *_rev* contains the document revision. - -`collection.insert(data, options)`
-`collection.save(data, options)` - -Creates a new document in the *collection* from the given *data* as -above. The optional *options* parameter must be an object and can be -used to specify the following options: - - - *waitForSync*: One can force - synchronization of the document creation operation to disk even in - case that the *waitForSync* flag is been disabled for the entire - collection. Thus, the *waitForSync* option can be used to force - synchronization of just specific operations. To use this, set the - *waitForSync* parameter to *true*. If the *waitForSync* parameter - is not specified or set to *false*, then the collection's default - *waitForSync* behavior is applied. The *waitForSync* parameter - cannot be used to disable synchronization for collections that have - a default *waitForSync* value of *true*. - - *silent*: If this flag is set to *true*, the method does not return - any output. - - *overwrite*: If set to *true*, the insert becomes a replace-insert. - If a document with the same *_key* already exists the new document - is not rejected with unique constraint violated but will replace - the old document. - - *returnNew*: If this flag is set to *true*, the complete new document - is returned in the output under the attribute *new*. - - *returnOld*: If this flag is set to *true*, the complete old document - is returned in the output under the attribute *old*. Only available - in combination with the *overwrite* option - -`collection.insert(array)` - -`collection.insert(array, options)` - -These two variants allow to perform the operation on a whole array of -arguments. The behavior is exactly as if *insert* would have been called on all -members of the array separately and all results are returned in an array. If an -error occurs with any of the documents, no exception is risen! Instead of a -document an error object is returned in the result array. The options behave -exactly as before. - -**Changes in 3.0 from 2.8:** - -The options *silent* and *returnNew* are new. The method can now insert -multiple documents with one call. - - -**Examples** - - - @startDocuBlockInline documentsCollectionInsertSingle - @EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionInsertSingle} - ~ db._create("example"); - db.example.insert({ Hello : "World" }); - db.example.insert({ Hello : "World" }, {waitForSync: true}); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentsCollectionInsertSingle - - @startDocuBlockInline documentsCollectionInsertMulti - @EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionInsertMulti} - ~ db._create("example"); - db.example.insert([{ Hello : "World" }, {Hello: "there"}]) - db.example.insert([{ Hello : "World" }, {}], {waitForSync: true}); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentsCollectionInsertMulti - - @startDocuBlockInline documentsCollectionInsertSingleOverwrite - @EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionInsertSingleOverwrite} - ~ db._create("example"); - db.example.insert({ _key : "666", Hello : "World" }); - db.example.insert({ _key : "666", Hello : "Universe" }, {overwrite: true, returnOld: true}); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentsCollectionInsertSingleOverwrite - - -Replace -------- - - - - -`collection.replace(selector, data)` - -Replaces an existing document described by the *selector*, which must -be an object containing the *_id* or *_key* attribute. There must be -a document with that *_id* or *_key* in the current collection. This -document is then replaced with the *data* given as second argument. -Any attribute *_id*, *_key* or *_rev* in *data* is ignored. - -The method returns a document with the attributes *_id*, *_key*, *_rev* -and *_oldRev*. The attribute *_id* contains the document handle of the -updated document, the attribute *_rev* contains the document revision of -the updated document, the attribute *_oldRev* contains the revision of -the old (now replaced) document. - -If the selector contains a *_rev* attribute, the method first checks -that the specified revision is the current revision of that document. -If not, there is a conflict, and an error is thrown. - -`collection.replace(selector, data, options)` - -As before, but *options* must be an object that can contain the following -boolean attributes: - - - *waitForSync*: One can force - synchronization of the document creation operation to disk even in - case that the *waitForSync* flag is been disabled for the entire - collection. Thus, the *waitForSync* option can be used to force - synchronization of just specific operations. To use this, set the - *waitForSync* parameter to *true*. If the *waitForSync* parameter - is not specified or set to *false*, then the collection's default - *waitForSync* behavior is applied. The *waitForSync* parameter - cannot be used to disable synchronization for collections that have - a default *waitForSync* value of *true*. - - *overwrite*: If this flag is set to *true*, a *_rev* attribute in - the selector is ignored. - - *returnNew*: If this flag is set to *true*, the complete new document - is returned in the output under the attribute *new*. - - *returnOld*: If this flag is set to *true*, the complete previous - revision of the document is returned in the output under the - attribute *old*. - - *silent*: If this flag is set to *true*, no output is returned. - -`collection.replace(document-handle, data)` - -`collection.replace(document-handle, data, options)` - -As before. Instead of *selector* a *document-handle* can be passed as -first argument. No revision precondition is tested. - -`collection.replace(document-key, data)` - -`collection.replace(document-key, data, options)` - -As before. Instead of *selector* a *document-key* can be passed as -first argument. No revision precondition is tested. - -`collection.replace(selectorarray, dataarray)` - -`collection.replace(selectorarray, dataarray, options)` - -These two variants allow to perform the operation on a whole array of -selector/data pairs. The two arrays given as *selectorarray* and *dataarray* -must have the same length. The behavior is exactly as if *replace* would have -been called on all respective members of the two arrays and all results are -returned in an array. If an error occurs with any of the documents, no -exception is risen! Instead of a document an error object is returned in the -result array. The options behave exactly as before. - -**Examples** - - -Create and update a document: - - @startDocuBlockInline documentsCollectionReplace1 - @EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionReplace1} - ~ db._create("example"); - a1 = db.example.insert({ a : 1 }); - a2 = db.example.replace(a1, { a : 2 }); - a3 = db.example.replace(a1, { a : 3 }); // xpError(ERROR_ARANGO_CONFLICT); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentsCollectionReplace1 - -Use a document handle: - - @startDocuBlockInline documentsCollectionReplaceHandle - @EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionReplaceHandle} - ~ db._create("example"); - ~ var myid = db.example.insert({_key: "3903044"}); - a1 = db.example.insert({ a : 1 }); - a2 = db.example.replace("example/3903044", { a : 2 }); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentsCollectionReplaceHandle - -**Changes in 3.0 from 2.8:** - -The options *silent*, *returnNew* and *returnOld* are new. The method -can now replace multiple documents with one call. - - -Update ------- - - - -`collection.update(selector, data)` - -Updates an existing document described by the *selector*, which must -be an object containing the *_id* or *_key* attribute. There must be -a document with that *_id* or *_key* in the current collection. This -document is then patched with the *data* given as second argument. -Any attribute *_id*, *_key* or *_rev* in *data* is ignored. - -The method returns a document with the attributes *_id*, *_key*, *_rev* -and *_oldRev*. The attribute *_id* contains the document handle of the -updated document, the attribute *_rev* contains the document revision of -the updated document, the attribute *_oldRev* contains the revision of -the old (now updated) document. - -If the selector contains a *_rev* attribute, the method first checks -that the specified revision is the current revision of that document. -If not, there is a conflict, and an error is thrown. - -`collection.update(selector, data, options)` - -As before, but *options* must be an object that can contain the following -boolean attributes: - - - *waitForSync*: One can force - synchronization of the document creation operation to disk even in - case that the *waitForSync* flag is been disabled for the entire - collection. Thus, the *waitForSync* option can be used to force - synchronization of just specific operations. To use this, set the - *waitForSync* parameter to *true*. If the *waitForSync* parameter - is not specified or set to *false*, then the collection's default - *waitForSync* behavior is applied. The *waitForSync* parameter - cannot be used to disable synchronization for collections that have - a default *waitForSync* value of *true*. - - *overwrite*: If this flag is set to *true*, a *_rev* attribute in - the selector is ignored. - - *returnNew*: If this flag is set to *true*, the complete new document - is returned in the output under the attribute *new*. - - *returnOld*: If this flag is set to *true*, the complete previous - revision of the document is returned in the output under the - attribute *old*. - - *silent*: If this flag is set to *true*, no output is returned. - - *keepNull*: The optional *keepNull* parameter can be used to modify - the behavior when handling *null* values. Normally, *null* values - are stored in the database. By setting the *keepNull* parameter to - *false*, this behavior can be changed so that all attributes in - *data* with *null* values will be removed from the target document. - - *mergeObjects*: Controls whether objects (not arrays) will be - merged if present in both the existing and the patch document. If - set to *false*, the value in the patch document will overwrite the - existing document's value. If set to *true*, objects will be merged. - The default is *true*. - - -`collection.update(document-handle, data)` - -`collection.update(document-handle, data, options)` - -As before. Instead of *selector* a *document-handle* can be passed as -first argument. No revision precondition is tested. - -`collection.update(document-key, data)` - -`collection.update(document-key, data, options)` - -As before. Instead of *selector* a *document-key* can be passed as -first argument. No revision precondition is tested. - -`collection.update(selectorarray, dataarray)` - -`collection.update(selectorarray, dataarray, options)` - -These two variants allow to perform the operation on a whole array of -selector/data pairs. The two arrays given as *selectorarray* and *dataarray* -must have the same length. The behavior is exactly as if *update* would have -been called on all respective members of the two arrays and all results are -returned in an array. If an error occurs with any of the documents, no -exception is risen! Instead of a document an error object is returned in the -result array. The options behave exactly as before. - -*Examples* - -Create and update a document: - - @startDocuBlockInline documentsCollection_UpdateDocument - @EXAMPLE_ARANGOSH_OUTPUT{documentsCollection_UpdateDocument} - ~ db._create("example"); - a1 = db.example.insert({"a" : 1}); - a2 = db.example.update(a1, {"b" : 2, "c" : 3}); - a3 = db.example.update(a1, {"d" : 4}); // xpError(ERROR_ARANGO_CONFLICT); - a4 = db.example.update(a2, {"e" : 5, "f" : 6 }); - db.example.document(a4); - a5 = db.example.update(a4, {"a" : 1, c : 9, e : 42 }); - db.example.document(a5); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentsCollection_UpdateDocument - -Use a document handle: - - @startDocuBlockInline documentsCollection_UpdateHandleSingle - @EXAMPLE_ARANGOSH_OUTPUT{documentsCollection_UpdateHandleSingle} - ~ db._create("example"); - ~ var myid = db.example.insert({_key: "18612115"}); - a1 = db.example.insert({"a" : 1}); - a2 = db.example.update("example/18612115", { "x" : 1, "y" : 2 }); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentsCollection_UpdateHandleSingle - -Use the keepNull parameter to remove attributes with null values: - - @startDocuBlockInline documentsCollection_UpdateHandleKeepNull - @EXAMPLE_ARANGOSH_OUTPUT{documentsCollection_UpdateHandleKeepNull} - ~ db._create("example"); - ~ var myid = db.example.insert({_key: "19988371"}); - db.example.insert({"a" : 1}); - |db.example.update("example/19988371", - { "b" : null, "c" : null, "d" : 3 }); - db.example.document("example/19988371"); - db.example.update("example/19988371", { "a" : null }, false, false); - db.example.document("example/19988371"); - | db.example.update("example/19988371", - { "b" : null, "c": null, "d" : null }, false, false); - db.example.document("example/19988371"); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentsCollection_UpdateHandleKeepNull - -Patching array values: - - @startDocuBlockInline documentsCollection_UpdateHandleArray - @EXAMPLE_ARANGOSH_OUTPUT{documentsCollection_UpdateHandleArray} - ~ db._create("example"); - ~ var myid = db.example.insert({_key: "20774803"}); - | db.example.insert({"a" : { "one" : 1, "two" : 2, "three" : 3 }, - "b" : { }}); - | db.example.update("example/20774803", {"a" : { "four" : 4 }, - "b" : { "b1" : 1 }}); - db.example.document("example/20774803"); - | db.example.update("example/20774803", { "a" : { "one" : null }, - | "b" : null }, - false, false); - db.example.document("example/20774803"); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentsCollection_UpdateHandleArray - - -**Changes in 3.0 from 2.8:** - -The options *silent*, *returnNew* and *returnOld* are new. The method -can now update multiple documents with one call. - - -Remove ------- - - - -`collection.remove(selector)` - -Removes a document described by the *selector*, which must be an object -containing the *_id* or *_key* attribute. There must be a document with -that *_id* or *_key* in the current collection. This document is then -removed. - -The method returns a document with the attributes *_id*, *_key* and *_rev*. -The attribute *_id* contains the document handle of the -removed document, the attribute *_rev* contains the document revision of -the removed document. - -If the selector contains a *_rev* attribute, the method first checks -that the specified revision is the current revision of that document. -If not, there is a conflict, and an error is thrown. - -`collection.remove(selector, options)` - -As before, but *options* must be an object that can contain the following -boolean attributes: - - - *waitForSync*: One can force - synchronization of the document creation operation to disk even in - case that the *waitForSync* flag is been disabled for the entire - collection. Thus, the *waitForSync* option can be used to force - synchronization of just specific operations. To use this, set the - *waitForSync* parameter to *true*. If the *waitForSync* parameter - is not specified or set to *false*, then the collection's default - *waitForSync* behavior is applied. The *waitForSync* parameter - cannot be used to disable synchronization for collections that have - a default *waitForSync* value of *true*. - - *overwrite*: If this flag is set to *true*, a *_rev* attribute in - the selector is ignored. - - *returnOld*: If this flag is set to *true*, the complete previous - revision of the document is returned in the output under the - attribute *old*. - - *silent*: If this flag is set to *true*, no output is returned. - -`collection.remove(document-handle)` - -`collection.remove(document-handle, options)` - -As before. Instead of *selector* a *document-handle* can be passed as -first argument. No revision check is performed. - -`collection.remove(document-key)` - -`collection.remove(document-handle, options)` - -As before. Instead of *selector* a *document-handle* can be passed as -first argument. No revision check is performed. - -`collection.remove(selectorarray)` - -`collection.remove(selectorarray,options)` - -These two variants allow to perform the operation on a whole array of -selectors. The behavior is exactly as if *remove* would have been called on all -members of the array separately and all results are returned in an array. If an -error occurs with any of the documents, no exception is risen! Instead of a -document an error object is returned in the result array. The options behave -exactly as before. - -**Examples** - - -Remove a document: - - @startDocuBlockInline documentDocumentRemoveSimple - @EXAMPLE_ARANGOSH_OUTPUT{documentDocumentRemoveSimple} - ~ db._create("example"); - a1 = db.example.insert({ a : 1 }); - db.example.document(a1); - db.example.remove(a1); - db.example.document(a1); // xpError(ERROR_ARANGO_DOCUMENT_NOT_FOUND); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentDocumentRemoveSimple - -Remove a document with a conflict: - - @startDocuBlockInline documentDocumentRemoveConflict - @EXAMPLE_ARANGOSH_OUTPUT{documentDocumentRemoveConflict} - ~ db._create("example"); - a1 = db.example.insert({ a : 1 }); - a2 = db.example.replace(a1, { a : 2 }); - db.example.remove(a1); // xpError(ERROR_ARANGO_CONFLICT); - db.example.remove(a1, true); - db.example.document(a1); // xpError(ERROR_ARANGO_DOCUMENT_NOT_FOUND); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock documentDocumentRemoveConflict - -**Changes in 3.0 from 2.8:** - -The method now returns not only *true* but information about the removed -document(s). The options *silent* and *returnOld* are new. The method -can now remove multiple documents with one call. - - - -Remove By Keys --------------- - - - - -`collection.removeByKeys(keys)` - - -Looks up the documents in the specified collection using the array of keys -provided, and removes all documents from the collection whose keys are -contained in the *keys* array. Keys for which no document can be found in -the underlying collection are ignored, and no exception will be thrown for -them. - -The method will return an object containing the number of removed documents -in the *removed* sub-attribute, and the number of not-removed/ignored -documents in the *ignored* sub-attribute. - -This method is deprecated in favour of the array variant of *remove*. - -**Examples** - - - @startDocuBlockInline collectionRemoveByKeys - @EXAMPLE_ARANGOSH_OUTPUT{collectionRemoveByKeys} - ~ db._drop("example"); - ~ db._create("example"); - keys = [ ]; - | for (var i = 0; i < 10; ++i) { - | db.example.insert({ _key: "test" + i, value: i }); - | keys.push("test" + i); - } - db.example.removeByKeys(keys); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionRemoveByKeys - - -Remove By Example ------------------ - - - - -`collection.removeByExample(example)` - -Removes all documents matching an example. - -`collection.removeByExample(document, waitForSync)` - -The optional *waitForSync* parameter can be used to force synchronization -of the document deletion operation to disk even in case that the -*waitForSync* flag had been disabled for the entire collection. Thus, -the *waitForSync* parameter can be used to force synchronization of just -specific operations. To use this, set the *waitForSync* parameter to -*true*. If the *waitForSync* parameter is not specified or set to -*false*, then the collection's default *waitForSync* behavior is -applied. The *waitForSync* parameter cannot be used to disable -synchronization for collections that have a default *waitForSync* value -of *true*. - -`collection.removeByExample(document, waitForSync, limit)` - -The optional *limit* parameter can be used to restrict the number of -removals to the specified value. If *limit* is specified but less than the -number of documents in the collection, it is undefined which documents are -removed. - - -**Examples** - - - @startDocuBlockInline 010_documentsCollectionRemoveByExample - @EXAMPLE_ARANGOSH_OUTPUT{010_documentsCollectionRemoveByExample} - ~ db._create("example"); - ~ db.example.insert({ Hello : "world" }); - db.example.removeByExample( {Hello : "world"} ); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 010_documentsCollectionRemoveByExample - - -Replace By Example ------------------- - - - - -`collection.replaceByExample(example, newValue)` - -Replaces all documents matching an example with a new document body. -The entire document body of each document matching the *example* will be -replaced with *newValue*. The document meta-attributes *_id*, *_key* and -*_rev* will not be replaced. - -`collection.replaceByExample(document, newValue, waitForSync)` - -The optional *waitForSync* parameter can be used to force synchronization -of the document replacement operation to disk even in case that the -*waitForSync* flag had been disabled for the entire collection. Thus, -the *waitForSync* parameter can be used to force synchronization of just -specific operations. To use this, set the *waitForSync* parameter to -*true*. If the *waitForSync* parameter is not specified or set to -*false*, then the collection's default *waitForSync* behavior is -applied. The *waitForSync* parameter cannot be used to disable -synchronization for collections that have a default *waitForSync* value -of *true*. - -`collection.replaceByExample(document, newValue, waitForSync, limit)` - -The optional *limit* parameter can be used to restrict the number of -replacements to the specified value. If *limit* is specified but less than -the number of documents in the collection, it is undefined which documents are -replaced. - - -**Examples** - - - @startDocuBlockInline 011_documentsCollectionReplaceByExample - @EXAMPLE_ARANGOSH_OUTPUT{011_documentsCollectionReplaceByExample} - ~ db._create("example"); - db.example.insert({ Hello : "world" }); - db.example.replaceByExample({ Hello: "world" }, {Hello: "mars"}, false, 5); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 011_documentsCollectionReplaceByExample - - -Update By Example ------------------ - - - - -`collection.updateByExample(example, newValue)` - -Partially updates all documents matching an example with a new document body. -Specific attributes in the document body of each document matching the -*example* will be updated with the values from *newValue*. -The document meta-attributes *_id*, *_key* and *_rev* cannot be updated. - -Partial update could also be used to append new fields, -if there were no old field with same name. - -`collection.updateByExample(document, newValue, keepNull, waitForSync)` - -The optional *keepNull* parameter can be used to modify the behavior when -handling *null* values. Normally, *null* values are stored in the -database. By setting the *keepNull* parameter to *false*, this behavior -can be changed so that all attributes in *data* with *null* values will -be removed from the target document. - -The optional *waitForSync* parameter can be used to force synchronization -of the document replacement operation to disk even in case that the -*waitForSync* flag had been disabled for the entire collection. Thus, -the *waitForSync* parameter can be used to force synchronization of just -specific operations. To use this, set the *waitForSync* parameter to -*true*. If the *waitForSync* parameter is not specified or set to -*false*, then the collection's default *waitForSync* behavior is -applied. The *waitForSync* parameter cannot be used to disable -synchronization for collections that have a default *waitForSync* value -of *true*. - -`collection.updateByExample(document, newValue, keepNull, waitForSync, limit)` - -The optional *limit* parameter can be used to restrict the number of -updates to the specified value. If *limit* is specified but less than -the number of documents in the collection, it is undefined which documents are -updated. - -`collection.updateByExample(document, newValue, options)` - -Using this variant, the options for the operation can be passed using -an object with the following sub-attributes: - - - *keepNull* - - *waitForSync* - - *limit* - - *mergeObjects* - - -**Examples** - - - @startDocuBlockInline 012_documentsCollectionUpdateByExample - @EXAMPLE_ARANGOSH_OUTPUT{012_documentsCollectionUpdateByExample} - ~ db._create("example"); - db.example.insert({ Hello : "world", foo : "bar" }); - db.example.updateByExample({ Hello: "world" }, { Hello: "foo", World: "bar" }, false); - db.example.byExample({ Hello: "foo" }).toArray() - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 012_documentsCollectionUpdateByExample - - -Collection type ---------------- - -`collection.type()` - -Returns the type of a collection. Possible values are: -- 2: document collection -- 3: edge collection - - -Convert a document key to a document id ---------------------------------------- - - - -`collection.documentId(documentKey)` - -Qualifies the given document key with this collection's name to derive a -valid document id. - -Throws if the document key is invalid. Note that this method does not -check whether the document already exists in this collection. - - -Get the Version of ArangoDB ---------------------------- - -`db._version()` - -Returns the server version string. Note that this is not the version of the -database. - - -**Examples** - - - @startDocuBlockInline dbVersion - @EXAMPLE_ARANGOSH_OUTPUT{dbVersion} - require("@arangodb").db._version(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock dbVersion - -Edges ------ - -Edges are normal documents that always contain a `_from` and a `_to` -attribute. Therefore, you can use the document methods to operate on -edges. The following methods, however, are specific to edges. - -`edge-collection.edges(vertex)` - -The *edges* operator finds all edges starting from (outbound) or ending -in (inbound) *vertex*. - -`edge-collection.edges(vertices)` - -The *edges* operator finds all edges starting from (outbound) or ending -in (inbound) a document from *vertices*, which must be a list of documents -or document handles. - - @startDocuBlockInline EDGCOL_02_Relation - @EXAMPLE_ARANGOSH_OUTPUT{EDGCOL_02_Relation} - db._create("vertex"); - db._createEdgeCollection("relation"); - var myGraph = {}; - myGraph.v1 = db.vertex.insert({ name : "vertex 1" }); - myGraph.v2 = db.vertex.insert({ name : "vertex 2" }); - | myGraph.e1 = db.relation.insert(myGraph.v1, myGraph.v2, - { label : "knows"}); - db._document(myGraph.e1); - db.relation.edges(myGraph.e1._id); - ~ db._drop("relation"); - ~ db._drop("vertex"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock EDGCOL_02_Relation - -`edge-collection.inEdges(vertex)` - -The *edges* operator finds all edges ending in (inbound) *vertex*. - -`edge-collection.inEdges(vertices)` - -The *edges* operator finds all edges ending in (inbound) a document from -*vertices*, which must a list of documents or document handles. - -**Examples** - - @startDocuBlockInline EDGCOL_02_inEdges - @EXAMPLE_ARANGOSH_OUTPUT{EDGCOL_02_inEdges} - db._create("vertex"); - db._createEdgeCollection("relation"); - ~ var myGraph = {}; - myGraph.v1 = db.vertex.insert({ name : "vertex 1" }); - myGraph.v2 = db.vertex.insert({ name : "vertex 2" }); - | myGraph.e1 = db.relation.insert(myGraph.v1, myGraph.v2, - { label : "knows"}); - db._document(myGraph.e1); - db.relation.inEdges(myGraph.v1._id); - db.relation.inEdges(myGraph.v2._id); - ~ db._drop("relation"); - ~ db._drop("vertex"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock EDGCOL_02_inEdges - -`edge-collection.outEdges(vertex)` - -The *edges* operator finds all edges starting from (outbound) -*vertices*. - -`edge-collection.outEdges(vertices)` - -The *edges* operator finds all edges starting from (outbound) a document -from *vertices*, which must a list of documents or document handles. - - -**Examples** - - @startDocuBlockInline EDGCOL_02_outEdges - @EXAMPLE_ARANGOSH_OUTPUT{EDGCOL_02_outEdges} - db._create("vertex"); - db._createEdgeCollection("relation"); - ~ var myGraph = {}; - myGraph.v1 = db.vertex.insert({ name : "vertex 1" }); - myGraph.v2 = db.vertex.insert({ name : "vertex 2" }); - | myGraph.e1 = db.relation.insert(myGraph.v1, myGraph.v2, - { label : "knows"}); - db._document(myGraph.e1); - db.relation.outEdges(myGraph.v1._id); - db.relation.outEdges(myGraph.v2._id); - ~ db._drop("relation"); - ~ db._drop("vertex"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock EDGCOL_02_outEdges - -Misc ----- - -`collection.iterate(iterator, options)` - -Iterates over some elements of the collection and apply the function -*iterator* to the elements. The function will be called with the -document as first argument and the current number (starting with 0) -as second argument. - -*options* must be an object with the following attributes: - - - *limit* (optional, default none): use at most *limit* documents. - - - *probability* (optional, default all): a number between *0* and - *1*. Documents are chosen with this probability. - -**Examples** - - @startDocuBlockInline accessViaGeoIndex - @EXAMPLE_ARANGOSH_OUTPUT{accessViaGeoIndex} - ~db._create("example") - |for (i = -90; i <= 90; i += 10) { - | for (j = -180; j <= 180; j += 10) { - | db.example.insert({ name : "Name/" + i + "/" + j, - | home : [ i, j ], - | work : [ -i, -j ] }); - | } - |} - - db.example.ensureIndex({ type: "geo", fields: [ "home" ] }); - |items = db.example.getIndexes().map(function(x) { return x.id; }); - db.example.index(items[1]); - ~ db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock accessViaGeoIndex diff --git a/Documentation/Books/Manual/DataModeling/Documents/README.md b/Documentation/Books/Manual/DataModeling/Documents/README.md deleted file mode 100644 index f04adc61a0d8..000000000000 --- a/Documentation/Books/Manual/DataModeling/Documents/README.md +++ /dev/null @@ -1,10 +0,0 @@ -Documents -========= - -This is an introduction to ArangoDB's interface for working with -documents from the JavaScript shell *arangosh* or in JavaScript code in -the server. For other languages see the corresponding language API. - -- [Basics and Terminology](DocumentAddress.md): section on the basic approach -- [Collection Methods](DocumentMethods.md): detailed API description for collection objects -- [Database Methods](DatabaseMethods.md): detailed API description for database objects diff --git a/Documentation/Books/Manual/DataModeling/GraphsVerticesEdges.md b/Documentation/Books/Manual/DataModeling/GraphsVerticesEdges.md deleted file mode 100644 index 45c19fa73ab3..000000000000 --- a/Documentation/Books/Manual/DataModeling/GraphsVerticesEdges.md +++ /dev/null @@ -1,10 +0,0 @@ -Graphs, Vertices & Edges -======================== - -Graphs, vertices & edges are defined in the [Graphs](../Graphs/README.md) chapter in details. - -Related blog posts and learning material: - -- [Graphs in data modeling - is the emperor naked?](https://medium.com/@neunhoef/graphs-in-data-modeling-is-the-emperor-naked-2e65e2744413#.x0a5z66ji) -- [Index Free Adjacency or Hybrid Indexes for Graph Databases](https://www.arangodb.com/2016/04/index-free-adjacency-hybrid-indexes-graph-databases/) -- [ArangoDB Performance Course](https://www.arangodb.com/arangodb-performance-course/) diff --git a/Documentation/Books/Manual/DataModeling/NamingConventions/AttributeNames.md b/Documentation/Books/Manual/DataModeling/NamingConventions/AttributeNames.md deleted file mode 100644 index 8d89c5bb774a..000000000000 --- a/Documentation/Books/Manual/DataModeling/NamingConventions/AttributeNames.md +++ /dev/null @@ -1,41 +0,0 @@ -Attribute Names -=============== - -Users can pick attribute names for document attributes as desired, provided the -following attribute naming constraints are not violated: - -- Attribute names starting with an underscore are considered to be system - attributes for ArangoDB's internal use. Such attribute names are already used - by ArangoDB for special purposes: - - *_id* is used to contain a document's handle - - *_key* is used to contain a document's user-defined key - - *_rev* is used to contain the document's revision number - - In edge collections, the - - *_from* - - *_to* - - attributes are used to reference other documents. - - More system attributes may be added in the future without further notice so - end users should try to avoid using their own attribute names starting with - underscores. - -* Theoretically, attribute names can include punctuation and special characters - as desired, provided the name is a valid UTF-8 string. For maximum - portability, special characters should be avoided though. For example, - attribute names may contain the dot symbol, but the dot has a special meaning - in JavaScript and also in AQL, so when using such attribute names in one of - these languages, the attribute name needs to be quoted by the end user. - Overall it might be better to use attribute names which don't require any - quoting/escaping in all languages used. This includes languages used by the - client (e.g. Ruby, PHP) if the attributes are mapped to object members there. -* Attribute names starting with an at-mark (*@*) will need to be enclosed in - backticks when used in an AQL query to tell them apart from bind variables. - Therefore we do not encourage the use of attributes starting with at-marks, - though they will work when used properly. -* ArangoDB does not enforce a length limit for attribute names. However, long - attribute names may use more memory in result sets etc. Therefore the use - of long attribute names is discouraged. -* Attribute names are case-sensitive. -* Attributes with empty names (an empty string) are disallowed. - diff --git a/Documentation/Books/Manual/DataModeling/NamingConventions/CollectionAndViewNames.md b/Documentation/Books/Manual/DataModeling/NamingConventions/CollectionAndViewNames.md deleted file mode 100644 index 5d9599076f6d..000000000000 --- a/Documentation/Books/Manual/DataModeling/NamingConventions/CollectionAndViewNames.md +++ /dev/null @@ -1,16 +0,0 @@ -Collection and View Names -========================= - -Users can pick names for their collections (or views) as desired, provided the -following naming constraints are not violated: - -* Collection names must only consist of the letters *a* to *z* (both in lower - and upper case), the numbers *0* to *9*, and the underscore (*_*) or dash (*-*) - symbols. This also means that any non-ASCII collection names are not allowed -* User-defined collection names must always start with a letter. System collection - names must start with an underscore. - All collection names starting with an underscore are considered to be system - collections that are for ArangoDB's internal use only. System collection names - should not be used by end users for their own collections -* The maximum allowed length of a collection name is 64 bytes -* Collection names are case-sensitive diff --git a/Documentation/Books/Manual/DataModeling/NamingConventions/DatabaseNames.md b/Documentation/Books/Manual/DataModeling/NamingConventions/DatabaseNames.md deleted file mode 100644 index b8292b5689be..000000000000 --- a/Documentation/Books/Manual/DataModeling/NamingConventions/DatabaseNames.md +++ /dev/null @@ -1,17 +0,0 @@ -Database Names -============== - -ArangoDB will always start up with a default database, named *_system*. -Users can create additional databases in ArangoDB, provided the database -names conform to the following constraints: - -* Database names must only consist of the letters *a* to *z* (both lower and - upper case allowed), the numbers *0* to *9*, and the underscore (*_*) or - dash (*-*) symbols - This also means that any non-ASCII database names are not allowed -* Database names must always start with a letter. Database names starting - with an underscore are considered to be system databases, and users should - not create or delete those -* The maximum allowed length of a database name is 64 bytes -* Database names are case-sensitive - diff --git a/Documentation/Books/Manual/DataModeling/NamingConventions/DocumentKeys.md b/Documentation/Books/Manual/DataModeling/NamingConventions/DocumentKeys.md deleted file mode 100644 index c746f5150dc2..000000000000 --- a/Documentation/Books/Manual/DataModeling/NamingConventions/DocumentKeys.md +++ /dev/null @@ -1,37 +0,0 @@ -Document Keys -============= - -Users can define their own keys for documents they save. The document key will -be saved along with a document in the *_key* attribute. Users can pick key -values as required, provided that the values conform to the following -restrictions: - -* The key must be a string value. Numeric keys are not allowed, but any numeric - value can be put into a string and can then be used as document key. -* The key must be at least 1 byte and at most 254 bytes long. Empty keys are - disallowed when specified (though it may be valid to completely omit the - *_key* attribute from a document) -* It must consist of the letters a-z (lower or upper case), the digits 0-9 - or any of the following punctuation characters: - `_` `-` `:` `.` `@` `(` `)` `+` `,` `=` `;` `$` `!` `*` `'` `%` -* Any other characters, especially multi-byte UTF-8 sequences, whitespace or - punctuation characters cannot be used inside key values -* The key must be unique within the collection it is used - -Keys are case-sensitive, i.e. *myKey* and *MyKEY* are considered to be -different keys. - -Specifying a document key is optional when creating new documents. If no -document key is specified by the user, ArangoDB will create the document key -itself as each document is required to have a key. - -There are no guarantees about the format and pattern of auto-generated document -keys other than the above restrictions. Clients should therefore treat -auto-generated document keys as opaque values and not rely on their format. - -The current format for generated keys is a string containing numeric digits. -The numeric values reflect chronological time in the sense that _key values -generated later will contain higher numbers than _key values generated earlier. -But the exact value that will be generated by the server is not predictable. -Note that if you sort on the _key attribute, string comparison will be used, -which means `"100"` is less than `"99"` etc. diff --git a/Documentation/Books/Manual/DataModeling/NamingConventions/README.md b/Documentation/Books/Manual/DataModeling/NamingConventions/README.md deleted file mode 100644 index 4d4a58a4944e..000000000000 --- a/Documentation/Books/Manual/DataModeling/NamingConventions/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Naming Conventions in ArangoDB -============================== - -The following naming conventions should be followed by users when creating -databases, collections and documents in ArangoDB. - diff --git a/Documentation/Books/Manual/DataModeling/OperationalFactors.md b/Documentation/Books/Manual/DataModeling/OperationalFactors.md deleted file mode 100644 index d47907173e9a..000000000000 --- a/Documentation/Books/Manual/DataModeling/OperationalFactors.md +++ /dev/null @@ -1,297 +0,0 @@ -Data Modeling and Operational Factors -===================================== - -Designing the data model of your application is a crucial task that can make or -break the performance of your application. A well-designed data model will -allow you to write efficient AQL queries, increase throughput of CRUD operations -and will make sure your data is distributed in the most effective way. - -Whether you design a new application with ArangoDB or port an existing one to -use ArangoDB, you should always analyze the (expected) data access patterns of -your application in conjunction with several factors: - -Operation Atomicity -------------------- - -All insert / update / replace / remove operations in ArangoDB are atomic on a -_single_ document. Using a single instance of ArangoDB, multi-document / -multi-collection queries are guaranteed to be fully ACID, however in -cluster mode only single-document operations are also fully ACID. This has -implications if you try to ensure consistency across multiple operations. - -### Denormalizing Data - -In traditional _SQL_ databases it is considered a good practice to normalize -all your data across multiple tables to avoid duplicated data and ensure -consistency. - -ArangoDB is a schema-less _NoSQL_ multi-model database, so a good data model -is not necessarily normalized. On the contrary, to avoid extra joins it is -often an advantage to deliberately _denormalize_ your data model. - -To denormalize your data model you essentially combine all related entities -into a single document instead of spreading it over multiple documents and -collections. The advantage of this is that it allows you to atomically update -all of your connected data, the downside is that your documents become larger -(see below for more considerations on -[large documents](#document-and-transaction-sizes)). - -As a simple example, lets say you want to maintain the total amount of a -shopping basket (from an online shop) together with a list of all included -items and prices. The total balance of all items in the shopping basket should -stay in sync with the contained items, then you may put all contained items -inside the shopping basket document and only update them together: - -```json -{ - "_id": "basket/123", - "_key": "123", - "_rev": "_Xv0TA0O--_", - "user": "some_user", - "balance": "100", - "items": [ { "price": 10, "title": "Harry Potter and the Philosopher’s Stone" }, - { "price": 90, "title": "Vacuum XYZ" } ] -} -``` - -This allows you to avoid making lookups via the document keys in -multiple collections. - -### Ensuring Consistent Atomic Updates - -There are ways to ensure atomicity and consistency when performing updates in -your application. ArangoDB allows you to specify the revision ID (`_rev`) value -of the existing document you want to update. The update or replace operation is -only able to succeed if the values match. This way you can ensure that if your -application has read a document with a certain `_rev` value, the modifications -to it are only allowed to pass _if and only if_ the document was not changed by -someone else in the meantime. By specifying a document's previous revision ID -you can avoid losing updates on these documents without noticing it. - -You can specify the revision via the `_rev` field inside the document or via -the `If-Match: ` HTTP header in the documents REST API. -In the _arangosh_ you can perform such an operation like this: - -```js -db.basketCollection.update({"_key": "123", "_rev": "_Xv0TA0O--_"}, data) -// or replace -db.basketCollection.replace({"_key": "123", "_rev": "_Xv0TA0O--_"}, data) -``` - -An AQL query with the same effect can be written by using the _ignoreRevs_ -option together with a modification operation. Either let ArangoDB compare -the `_rev` value and only succeed if they still match, or let ArangoDB -ignore them (default): - -```js -FOR i IN 1..1000 - UPDATE { _key: CONCAT('test', i), _rev: "1287623" } - WITH { foobar: true } IN users - OPTIONS { ignoreRevs: false } -``` - -Indexes -------- - -Indexes can improve the performance of AQL queries drastically. Queries that -frequently filter on or one more fields can be made faster by creating an index -(in arangosh via the _ensureIndex_ command, the Web UI or your specific -client driver). There is already an automatic (and non-deletable) primary index -in every collection on the `_key` and `_id` fields as well as the edge index -on `_from` and `_to` (for edge collections). - -Should you decide to create an index you should consider a few things: - -- Indexes are a trade-off between storage space, maintenance cost and query speed. -- Each new index will increase the amount of RAM and (for the RocksDB storage) - the amount of disk space needed. -- Indexes with [indexed array values](../Indexing/IndexBasics.md#indexing-array-values) - need an extra index entry per array entry -- Adding indexes increases the write-amplification i.e. it negatively affects - the write performance (how much depends on the storage engine) -- Each index needs to add at least one index entry per document. You can use - _sparse indexes_ to avoid adding _null_ index entries for rarely used attributes -- Sparse indexes can be smaller than non-sparse indexes, but they can only be - used if the optimizer determines that the _null_ value cannot be in the - result range, e.g. by an explicit `FILTER doc.attribute != null` in AQL - (also see [Type and value order](../../AQL/Fundamentals/TypeValueOrder.html)). -- Collections that are more frequently read benefit the most from added indexes, - provided the indexes can actually be utilized -- Indexes on collections with a high rate of inserts or updates compared to - reads may hurt overall performance. - -Generally it is best to design your indexes with your queries in mind. -Use the [query profiler](../../AQL/ExecutionAndPerformance/QueryProfiler.html) -to understand the bottlenecks in your queries. - -Always consider the additional space requirements of extra indexes when -planning server capacities. For more information on indexes see -[Index Basics](../Indexing/IndexBasics.md). - - - -Number of Databases and Collections ------------------------------------ - -Sometimes you can consider to split up data over multiple collections. -For example, one could create a new set of collections for each new customer -instead of having a customer field on each documents. Having a few thousand -collections has no significant performance penalty for most operations and -results in good performance. - -Grouping documents into collections by type (i.e. a session collection -'sessions_dev', 'sessions_prod') allows you to avoid an extra index on a _type_ -field. Similarly you may consider to -[split edge collections](../Graphs/README.md#multiple-edge-collections-vs-filters-on-edge-document-attributes) -instead of specifying the type of the connection inside the edge document. - -A few things to consider: -- Adding an extra collection always incurs a small amount of overhead for the - collection metadata and indexes. -- You cannot use more than _2048_ collections per AQL query -- Uniqueness constraints on certain attributes (via an unique index) can only - be enforced by ArangoDB within one collection -- Only with the _MMFiles storage engine_: Creating extra databases will require - two compaction and cleanup threads per database. This might lead to - undesirable effects should you decide to create many databases compared to - the number of available CPU cores. - -Cluster Sharding ----------------- - -The ArangoDB cluster _partitions_ your collections into one or more _shards_ -across multiple _DBServers_. This enables efficient _horizontal scaling_: -It allows you to store much more data, since ArangoDB distributes the data -automatically to the different servers. In many situations one can also reap -a benefit in data throughput, again because the load can be distributed to -multiple machines. - -ArangoDB uses the specified _shard keys_ to determine in which shard a given -document is stored. Choosing the right shard key can have significant impact on -your performance can reduce network traffic and increase performance. - -ArangoDB uses consistent hashing to compute the target shard from the given -values (as specified via 'shardKeys'). The ideal set of shard keys allows -ArangoDB to distribute documents evenly across your shards and your _DBServers_. -By default ArangoDB uses the `_key` field as a shard key. For a custom shard key -you should consider a few different properties: - -- **Cardinality**: The cardinality of a set is the number of distinct values - that it contains. A shard key with only _N_ distinct values can not be hashed - onto more than _N_ shards. Consider using multiple shard keys, if one of your - values has a low cardinality. -- **Frequency**: Consider how often a given shard key value may appear in - your data. Having a lot of documents with identical shard keys will lead - to unevenly distributed data. - -See [Sharding](../Architecture/DeploymentModes/Cluster/Architecture.md#sharding) -for more information - -### Smart Graphs - -Smart Graphs are an Enterprise Edition feature of ArangoDB. It enables you to -manage graphs at scale, it will give a vast performance benefit for all graphs -sharded in an ArangoDB Cluster. - -To add a Smart Graph you need a smart graph attribute that partitions your -graph into several smaller sub-graphs. Ideally these sub-graphs follow a -"natural" structure in your data. These subgraphs have a large amount of edges -that only connect vertices in the same subgraph and only have few edges -connecting vertices from other subgraphs. - -All the usual considerations for sharding keys also apply for smart attributes, -for more information see [SmartGraphs](../Graphs/SmartGraphs/README.md) - -Document and Transaction Sizes ------------------------------- - -When designing your data-model you should keep in mind that the size of -documents affects the performance and storage requirements of your system. -Very large numbers of very small documents may have an unexpectedly big overhead: -Each document needs has a certain amount extra storage space, depending on the -storage engine and the indexes you added to the collection. The overhead may -become significant if your store a large amount of very small documents. - -Very large documents may reduce your write throughput: -This is due to the extra time needed to send larger documents over the -network as well as more copying work required inside the storage engines. - -Consider some ways to minimize the required amount of storage space: - -- Explicitly set the `_key` field to a custom unique value. - This enables you to store information in the `_key` field instead of another - field inside the document. The `_key` value is always indexed, setting a - custom value means you can use a shorter value than what would have been - generated automatically. -- Shorter field names will reduce the amount of space needed to store documents - (this has no effect on index size). ArangoDB is schemaless and needs to store - the document structure inside each document. Usually this is a small overhead - compared to the overall document size. -- Combining many small related documents into one larger one can also - reduce overhead. Common fields can be stored once and indexes just need to - store one entry. This will only be beneficial if the combined documents are - regularly retrieved together and not just subsets. - -RockDB Storage Engine ---------------------- - -Especially for the RocksDB storage engine large documents and transactions may -negatively impact the write performance: -- Consider a maximum size of 50-75 kB _per document_ as a good rule of thumb. - This will allow you to maintain steady write throughput even under very high load. -- Transactions are held in-memory before they are committed. - This means that transactions have to be split if they become too big, see the - [limitations section](../Transactions/Limitations.md#rocksdb-storage-engine). - -### Improving Update Query Perfromance - -You may use the _exclusive_ query option for modifying AQL queries, to improve the performance drastically. -This has the downside that no concurrent writes may occur on the collection, but ArangoDB is able -to use a special fast-path which should improve the performance by up to 50% for large collections. - -```js -FOR doc IN mycollection - UPDATE doc._key - WITH { foobar: true } IN mycollection - OPTIONS { exclusive: true } -``` - -The same naturally also applies for queries using _REPLACE_ or _INSERT_. Additionally you may be able to use -the `intermediateCommitCount` option in the API to subdivide the AQL transaction into smaller batches. - -### Read / Write Load Balance - -Depending on whether your data model has a higher read- or higher write-rate you may want -to adjust some of the RocksDB specific options. Some of the most critical options to -adjust the performance and memory usage are listed below: - -`--rocksdb.block-cache-size` - -This is the size of the block cache in bytes. This cache is used for read operations. -Increasing the size of this may improve the performance of read heavy workloads. -You may wish to adjust this parameter to control memory usage. - -`--rocksdb.write-buffer-size` - -Amount of data to build up in memory before converting to a file on disk. -Larger values increase performance, especially during bulk loads. - -`--rocksdb.max-write-buffer-number` - -Maximum number of write buffers that built up in memory, per internal column family. -The default and the minimum number is 2, so that when 1 write buffer -is being flushed to storage, new writes can continue to the other write buffer. - -`--rocksdb.total-write-buffer-size` - -The total amount of data to build up in all in-memory buffers when writing into ArangoDB. -You may wish to adjust this parameter to control memory usage. - -Setting this to a low value may limit the RAM that ArangoDB will use but may slow down -write heavy workloads. Setting this to 0 will not limit the size of the write-buffers. - -`--rocksdb.level0-stop-trigger` - -When this many files accumulate in level-0, writes will be stopped to allow compaction to catch up. -Setting this value very high may improve write throughput, but may lead to temporarily -bad read performance. diff --git a/Documentation/Books/Manual/DataModeling/README.md b/Documentation/Books/Manual/DataModeling/README.md deleted file mode 100644 index 6f8d15389e40..000000000000 --- a/Documentation/Books/Manual/DataModeling/README.md +++ /dev/null @@ -1,16 +0,0 @@ -Data models & modeling -====================== - -This chapter introduces ArangoDB's core concepts and covers - -- its data model (or data models respectively), -- the terminology used throughout the database system and in this - documentation, as well as -- aspects to consider when modeling your data to strike a balance - between natural data structures and great performance - -You will also find usage examples on how to interact with the database system -using [arangosh](../Programs/Arangosh/README.md), e.g. how to create and -drop databases / collections, or how to save, update, replace and remove -documents. You can do all this using the [web interface](../GettingStarted/WebInterface.md) -as well and may therefore skip these sections as beginner. diff --git a/Documentation/Books/Manual/DataModeling/Views/DatabaseMethods.md b/Documentation/Books/Manual/DataModeling/Views/DatabaseMethods.md deleted file mode 100644 index 54bd49ead8ab..000000000000 --- a/Documentation/Books/Manual/DataModeling/Views/DatabaseMethods.md +++ /dev/null @@ -1,131 +0,0 @@ -Database Methods -================ - -View ----- - - - -`db._view(view-name)` - -Returns the view with the given name or null if no such view exists. - - @startDocuBlockInline viewDatabaseGet - @EXAMPLE_ARANGOSH_OUTPUT{viewDatabaseGet} - ~ db._createView("example", "arangosearch", {}); - | view = db._view("example"); - // or, alternatively - view = db["example"] - ~ db._dropView("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock viewDatabaseGet - -`db._view(view-identifier)` - -Returns the view with the given identifier or null if no such view exists. -Accessing views by identifier is discouraged for end users. End users should -access views using the view name. - - -**Examples** - -Get a view by name: - - @startDocuBlockInline viewDatabaseNameKnown - @EXAMPLE_ARANGOSH_OUTPUT{viewDatabaseNameKnown} - db._view("demoView"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock viewDatabaseNameKnown - -Unknown view: - - @startDocuBlockInline viewDatabaseNameUnknown - @EXAMPLE_ARANGOSH_OUTPUT{viewDatabaseNameUnknown} - db._view("unknown"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock viewDatabaseNameUnknown - - -Create ------- - - - -`db._createView(view-name, view-type, view-properties)` - -Creates a new view named *view-name* of type *view-type* with properties -*view-properties*. - -*view-name* is a string and the name of the view. No view or collection with the -same name may already exist in the current database. For more information on -valid view names please refer to the [naming conventions -](../NamingConventions/README.md). - -*view-type* must be the string `"arangosearch"`, as it is currently the only -supported view type. - -*view-properties* is an optional object containing view configuration specific -to each view-type. Currently, only ArangoSearch Views are supported. See -[ArangoSearch View definition -](../../Views/ArangoSearch/DetailedOverview.md#view-definitionmodification) for -details. - -**Examples** - - @startDocuBlockInline viewDatabaseCreate - @EXAMPLE_ARANGOSH_OUTPUT{viewDatabaseCreate} - v = db._createView("example", "arangosearch"); - v.properties() - db._dropView("example") - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock viewDatabaseCreate - - -All Views ---------- - - - -`db._views()` - -Returns all views of the given database. - - -**Examples** - -List all views: - - @startDocuBlockInline viewDatabaseList - @EXAMPLE_ARANGOSH_OUTPUT{viewDatabaseList} - ~ db._createView("exampleView", "arangosearch"); - db._views(); - ~ db._dropView("exampleView"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock viewDatabaseList - -Drop ----- - - - -`db._dropView(view-name)` - -Drops a view named *view-name* and all its data. No error is thrown if there is -no such view. - -`db._dropView(view-identifier)` - -Drops a view identified by *view-identifier* with all its data. No error is -thrown if there is no such view. - -**Examples** - -Drop a view: - - @startDocuBlockInline viewDatabaseDrop - @EXAMPLE_ARANGOSH_OUTPUT{viewDatabaseDrop} - db._createView("exampleView", "arangosearch"); - db._dropView("exampleView"); - db._view("exampleView"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock viewDatabaseDrop diff --git a/Documentation/Books/Manual/DataModeling/Views/README.md b/Documentation/Books/Manual/DataModeling/Views/README.md deleted file mode 100644 index 61ee93b03368..000000000000 --- a/Documentation/Books/Manual/DataModeling/Views/README.md +++ /dev/null @@ -1,99 +0,0 @@ -JavaScript Interface to Views -============================= - -This is an introduction to ArangoDB's interface for views and how to handle -views from the JavaScript shell _arangosh_. For other languages see the -corresponding language API. - -Address of a View ------------------ - -Like [collections](../Collections/README.md), views are accessed by the user via -their unique name and internally via their identifier. Using the identifier for -accessing views is discouraged. Views share their namespace with collections, -so there cannot exist a view and a collection with the same name in the same -database. - -Usage ------ - -Here follow some basic usage examples. More details can be found in the -following chapters: - -- [ArangoSearch Views](../../Views/ArangoSearch/README.md) -- [Database Methods for Views](DatabaseMethods.md) -- [View Methods](ViewMethods.md) - -Create a view with default properties: - - @startDocuBlockInline viewUsage_01 - @EXAMPLE_ARANGOSH_OUTPUT{viewUsage_01} - ~ db._create("colA"); - ~ db._create("colB"); - view = db._createView("myView", "arangosearch", {}); - ~ addIgnoreCollection("colA"); - ~ addIgnoreCollection("colB"); - ~ addIgnoreView("myView"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock viewUsage_01 - -Get this view again later by name: - - @startDocuBlockInline viewUsage_02 - @EXAMPLE_ARANGOSH_OUTPUT{viewUsage_02} - view = db._view("myView"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock viewUsage_02 - -Get the view properties: - - @startDocuBlockInline viewUsage_03 - @EXAMPLE_ARANGOSH_OUTPUT{viewUsage_03} - view.properties(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock viewUsage_03 - -Set a view property: - - @startDocuBlockInline viewUsage_04 - @EXAMPLE_ARANGOSH_OUTPUT{viewUsage_04} - view.properties({cleanupIntervalStep: 12}); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock viewUsage_04 - -Add a link: - - @startDocuBlockInline viewUsage_05 - @EXAMPLE_ARANGOSH_OUTPUT{viewUsage_05} - view.properties({links: {colA: {includeAllFields: true}}}); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock viewUsage_05 - -Add another link: - - @startDocuBlockInline viewUsage_06 - @EXAMPLE_ARANGOSH_OUTPUT{viewUsage_06} - view.properties({links: {colB: {fields: {text: {}}}}}); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock viewUsage_06 - -Remove the first link again: - - @startDocuBlockInline viewUsage_07 - @EXAMPLE_ARANGOSH_OUTPUT{viewUsage_07} - view.properties({links: {colA: null}}); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock viewUsage_07 - -Drop the view: - - @startDocuBlockInline viewUsage_08 - @EXAMPLE_ARANGOSH_OUTPUT{viewUsage_08} - ~ removeIgnoreCollection("colA"); - ~ removeIgnoreCollection("colB"); - ~ removeIgnoreView("myView"); - db._dropView("myView"); - ~ db._drop("colA"); - ~ db._drop("colB"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock viewUsage_08 diff --git a/Documentation/Books/Manual/DataModeling/Views/ViewMethods.md b/Documentation/Books/Manual/DataModeling/Views/ViewMethods.md deleted file mode 100644 index 9b2935451a50..000000000000 --- a/Documentation/Books/Manual/DataModeling/Views/ViewMethods.md +++ /dev/null @@ -1,151 +0,0 @@ -View Methods -============ - -Drop ----- - - - -`view.drop()` - -Drops a *view* and all its data. - -**Examples** - -Drop a view: - - @startDocuBlockInline viewDrop - @EXAMPLE_ARANGOSH_OUTPUT{viewDrop} - | v = db._createView("example", "arangosearch"); - // or - v = db._view("example"); - v.drop(); - db._view("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock viewDrop - -Query Name ----------- - - - -`view.name()` - -Returns the name of the *view*. - -**Examples** - -Get view name: - - @startDocuBlockInline viewName - @EXAMPLE_ARANGOSH_OUTPUT{viewName} - v = db._view("demoView"); - v.name(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock viewName - -Rename ------- - - - -`view.rename(new-name)` - -Renames a view using the *new-name*. The *new-name* must not already be used by -a different view or collection in the same database. *new-name* must also be a -valid view name. For more information on valid view names please refer to the -[naming conventions](../NamingConventions/README.md). - -If renaming fails for any reason, an error is thrown. - -**Note**: this method is not available in a cluster. - -**Examples** - - @startDocuBlockInline viewRename - @EXAMPLE_ARANGOSH_OUTPUT{viewRename} - v = db._createView("example", "arangosearch"); - v.name(); - v.rename("exampleRenamed"); - v.name(); - ~ db._dropView("exampleRenamed"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock viewRename - -Query Type ----------- - - - -`view.type()` - -Returns the type of the *view*. - -**Examples** - -Get view type: - - @startDocuBlockInline viewType - @EXAMPLE_ARANGOSH_OUTPUT{viewType} - v = db._view("demoView"); - v.type(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock viewType - -Query Properties ----------------- - - - -`view.properties()` - -Returns the properties of the *view*. The format of the result is specific to -each of the supported [View Types](README.md). - -**Examples** - -Get view properties: - - @startDocuBlockInline viewGetProperties - @EXAMPLE_ARANGOSH_OUTPUT{viewGetProperties} - v = db._view("demoView"); - v.properties(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock viewGetProperties - -Modify Properties ------------------ - - - -`view.properties(view-property-modification, partialUpdate)` - -Modifies the properties of the *view*. The format of the result is specific to -each of the supported [View Types](README.md). *partialUpdate* is an optional -boolean parameter (`true` by default) that determines how -*view-property-modification* is merged with current view *properties* (adds or -updates *view-property-modification* properties to current if `true` and, -additionally, removes all other properties if `false`). - -Currently, the only supported view type is `arangosearch`, and its properties -can be found in -[](../../Views/ArangoSearch/DetailedOverview.md#view-properties). - -**Examples** - -Modify view properties: - - @startDocuBlockInline viewModifyProperties - @EXAMPLE_ARANGOSH_OUTPUT{viewModifyProperties} - ~ db._createView("example", "arangosearch"); - v = db._view("example"); - | v.properties(); - // set cleanupIntervalStep to 12 - | v.properties({cleanupIntervalStep: 12}); - // add a link - | v.properties({links: {demo: {}}}) - // remove a link - v.properties({links: {demo: null}}) - ~ db._dropView("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock viewModifyProperties diff --git a/Documentation/Books/Manual/Deployment/ActiveFailover/ManualStart.md b/Documentation/Books/Manual/Deployment/ActiveFailover/ManualStart.md deleted file mode 100644 index 4878d700b2fc..000000000000 --- a/Documentation/Books/Manual/Deployment/ActiveFailover/ManualStart.md +++ /dev/null @@ -1,263 +0,0 @@ -Starting Manually -================= - -An ArangoDB _Active Failover_ setup consists of several running _tasks_ or _processes_. - -This section describes how to start an _Active Failover_ by manually starting all -the needed processes. - -Before continuing, be sure to read the [Architecture](../../Architecture/DeploymentModes/ActiveFailover/Architecture.md) -section to get a basic understanding of the underlying architecture and the involved -roles in an ArangoDB Active Failover setup. - -We will include commands for a local test (all processes running on a single machine) -and for a more real production scenario, which makes use of 3 different machines. - -Local Tests ------------ - -In this paragraph we will include commands to manually start an Active Failover -with 3 _Agents_, and two single server instances. - -We will assume that all processes runs on the same machine (127.0.0.1). Such scenario -should be used for testing only. - -### Local Test Agency - -To start up an _Agency_ you first have to activate it. This is done by providing -the option `--agency.activate true`. - -To start up the _Agency_ in its fault tolerant mode set the `--agency.size` to `3`. -You will then have to start at least 3 _Agents_ before the _Agency_ will start operation. - -During initialization the _Agents_ have to find each other. To do so provide at -least one common `--agency.endpoint`. The _Agents_ will then coordinate startup -themselves. They will announce themselves with their external address which may be -specified using `--agency.my-address`. This is required in bridged docker setups -or NATed environments. - -So in summary these are the commands to start an _Agency_ of size 3: - -``` -arangod --server.endpoint tcp://0.0.0.0:5001 \ - --agency.my-address=tcp://127.0.0.1:5001 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.endpoint tcp://127.0.0.1:5001 \ - --agency.supervision true \ - --database.directory agent1 & - -arangod --server.endpoint tcp://0.0.0.0:5002 \ - --agency.my-address=tcp://127.0.0.1:5002 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.endpoint tcp://127.0.0.1:5001 \ - --agency.supervision true \ - --database.directory agent2 & - -arangod --server.endpoint tcp://0.0.0.0:5003 \ - --agency.my-address=tcp://127.0.0.1:5003 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.endpoint tcp://127.0.0.1:5001 \ - --agency.supervision true \ - --database.directory agent3 & -``` - -### Single Server Test Instances - -To start the two single server instances, you can use the following commands: - -``` -arangod --server.authentication false \ - --server.endpoint tcp://127.0.0.1:6001 \ - --cluster.my-address tcp://127.0.0.1:6001 \ - --cluster.my-role SINGLE \ - --cluster.agency-endpoint tcp://127.0.0.1:5001 \ - --cluster.agency-endpoint tcp://127.0.0.1:5002 \ - --cluster.agency-endpoint tcp://127.0.0.1:5003 \ - --replication.automatic-failover true \ - --database.directory singleserver6001 & - -arangod --server.authentication false \ - --server.endpoint tcp://127.0.0.1:6002 \ - --cluster.my-address tcp://127.0.0.1:6002 \ - --cluster.my-role SINGLE \ - --cluster.agency-endpoint tcp://127.0.0.1:5001 \ - --cluster.agency-endpoint tcp://127.0.0.1:5002 \ - --cluster.agency-endpoint tcp://127.0.0.1:5003 \ - --replication.automatic-failover true \ - --database.directory singleserver6002 & -``` - -Multiple Machines ------------------ - -The method from the previous paragraph can be extended to a more real production scenario, -to start an Active Failover on multiple machines. The only changes are that one -has to replace all local addresses `127.0.0.1` by the actual IP address of the -corresponding server. Obviously, it would no longer be necessary to use different -port numbers on different servers. - -Let's assume that you want to start you Active Failover with 3 _Agents_ and two -single servers on three different machines with IP addresses: - -``` -192.168.1.1 -192.168.1.2 -192.168.1.3 -``` - -Let's also suppose that each of the above machines runs an _Agent_, an the first -and second machine run also the single instance. - -If we use: - -- _8531_ as port of the _Agents_ -- _8529_ as port of the _Coordinators_ - -then the commands you have to use are reported in the following subparagraphs. - -### Agency - -On 192.168.1.1: - -``` -arangod --server.endpoint tcp://0.0.0.0:8531 \ - --agency.my-address tcp://192.168.1.1:8531 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.supervision true \ - --database.directory agent -``` - -On 192.168.1.2: - -``` -arangod --server.endpoint tcp://0.0.0.0:8531 \ - --agency.my-address tcp://192.168.1.2:8531 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.supervision true \ - --database.directory agent -``` - -On 192.168.1.3: - -``` -arangod --server.endpoint tcp://0.0.0.0:8531 \ - --agency.my-address tcp://192.168.1.3:8531 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.endpoint tcp://192.168.1.1:8531 \ - --agency.endpoint tcp://192.168.1.2:8531 \ - --agency.endpoint tcp://192.168.1.3:8531 \ - --agency.supervision true \ - --database.directory agent -``` - -### Single Server Instances - -On 192.168.1.1: - -``` -arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:8529 \ - --cluster.my-address tcp://192.168.1.1:8529 \ - --cluster.my-role SINGLE \ - --cluster.agency-endpoint tcp://192.168.1.1:8531 \ - --cluster.agency-endpoint tcp://192.168.1.2:8531 \ - --cluster.agency-endpoint tcp://192.168.1.3:8531 \ - --replication.automatic-failover true \ - --database.directory singleserver & -``` - -On 192.168.1.2: - -Wait until the previous server is fully started, then start the second single server -instance: - -``` -arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:8529 \ - --cluster.my-address tcp://192.168.1.2:8529 \ - --cluster.my-role SINGLE \ - --cluster.agency-endpoint tcp://192.168.1.1:8531 \ - --cluster.agency-endpoint tcp://192.168.1.2:8531 \ - --cluster.agency-endpoint tcp://192.168.1.3:8531 \ - --replication.automatic-failover true \ - --database.directory singleserver & -``` - -**Note:** in the above commands, you can use host names, if they can be resolved, -instead of IP addresses. - -Manual Start in Docker ----------------------- - -Manually starting an _Active Failover_ via Docker is basically the same as described in the -paragraphs above. - -A bit of extra care has to be invested due to the way in which Docker isolates its network. -By default it fully isolates the network and by doing so an endpoint like `--server.endpoint tcp://0.0.0.0:8529` -will only bind to all interfaces inside the Docker container which does not include -any external interface on the host machine. This may be sufficient if you just want -to access it locally but in case you want to expose it to the outside you must -facilitate Dockers port forwarding using the `-p` command line option. Be sure to -check the [official Docker documentation](https://docs.docker.com/engine/reference/run/). - -You can simply use the `-p` flag in Docker to make the individual processes available on the host -machine or you could use Docker's [links](https://docs.docker.com/engine/reference/run/) -to enable process intercommunication. - -An example configuration might look like this: - -``` -docker run -e ARANGO_NO_AUTH=1 -p 192.168.1.1:10000:8529 arangodb/arangodb arangod \ - --server.endpoint tcp://0.0.0.0:8529\ - --cluster.my-address tcp://192.168.1.1:10000 \ - --cluster.my-role SINGLE \ - --cluster.agency-endpoint tcp://192.168.1.1:9001 \ - --cluster.agency-endpoint tcp://192.168.1.2:9001 \ - --cluster.agency-endpoint tcp://192.168.1.3:9001 \ - --replication.automatic-failover true -``` - -This will start a single server within a Docker container with an isolated network. -Within the Docker container it will bind to all interfaces (this will be 127.0.0.1:8529 -and some internal Docker IP on port 8529). By supplying `-p 192.168.1.1:10000:8529` -we are establishing a port forwarding from our local IP (192.168.1.1 port 10000 in -this example) to port 8529 inside the container. Within the command we are telling -_arangod_ how it can be reached from the outside `--cluster.my-address tcp://192.168.1.1:10000`. - -### Authentication - -To start the official Docker container you will have to decide on an authentication -method, otherwise the container will not start. - -Provide one of the arguments to Docker as an environment variable. There are three -options: - -1. ARANGO_NO_AUTH=1 - - Disable authentication completely. Useful for local testing or for operating - in a trusted network (without a public interface). - -2. ARANGO_ROOT_PASSWORD=password - - Start ArangoDB with the given password for root. - -3. ARANGO_RANDOM_ROOT_PASSWORD=1 - - Let ArangoDB generate a random root password. - -For an in depth guide about Docker and ArangoDB please check the official documentation: -https://hub.docker.com/r/arangodb/arangodb/ . Note that we are using the image -`arangodb/arangodb` here which is always the most current one. There is also the -"official" one called `arangodb` whose documentation is here: https://hub.docker.com/_/arangodb/ diff --git a/Documentation/Books/Manual/Deployment/ActiveFailover/README.md b/Documentation/Books/Manual/Deployment/ActiveFailover/README.md deleted file mode 100644 index 6976816b6ac7..000000000000 --- a/Documentation/Books/Manual/Deployment/ActiveFailover/README.md +++ /dev/null @@ -1,13 +0,0 @@ -Active Failover Deployment -========================== - -This _Section_ describes how to deploy an _Active Failover_ environment. - -For a general introduction to _Active Failover_, please refer to the -[Active Failover](../../Architecture/DeploymentModes/ActiveFailover/README.md) chapter. - -There are two main ways to start an _Active Failover_ setup: - -1. using the [_ArangoDB Starter_](UsingTheStarter.md) (possibly in conjunction with -docker), or -1. [manually](ManualStart.md). \ No newline at end of file diff --git a/Documentation/Books/Manual/Deployment/ActiveFailover/UsingTheStarter.md b/Documentation/Books/Manual/Deployment/ActiveFailover/UsingTheStarter.md deleted file mode 100644 index 6f6c5d299735..000000000000 --- a/Documentation/Books/Manual/Deployment/ActiveFailover/UsingTheStarter.md +++ /dev/null @@ -1,129 +0,0 @@ - -Using the ArangoDB Starter -========================== - -This section describes how to start an Active Failover setup the tool [_Starter_](../../Programs/Starter/README.md) -(the _arangodb_ binary program). - -Local Tests ------------ - -If you want to start a local _Active Failover_ setup quickly, use the `--starter.local` -option of the _Starter_. This will start all servers within the context of a single -starter process: - -```bash -arangodb --starter.local --starter.mode=activefailover --starter.data-dir=./localdata -``` - -**Note:** When you restart the _Starter_, it remembers the original `--starter.local` flag. - -Multiple Machines ------------------ - -If you want to start an Active Failover setup using the _Starter_, use the `--starter.mode=activefailover` -option of the _Starter_. A 3 "machine" _Agency_ is started as well as 2 single servers, -that perform asynchronous replication and failover: - -```bash -arangodb --starter.mode=activefailover --server.storage-engine=rocksdb --starter.data-dir=./data --starter.join A,B,C -``` - -Run the above command on machine A, B & C. - -The _Starter_ will decide on which 2 machines to run a single server instance. -To override this decision (only valid while bootstrapping), add a -`--cluster.start-single=false` to the machine where the single server -instance should _not_ be started. - -Once all the processes started by the _Starter_ are up and running, and joined the -Active Failover setup (this may take a while depending on your system), the _Starter_ will inform -you where to connect the Active Failover from a Browser, shell or your program. - -For a full list of options of the _Starter_ please refer to [this](../../Programs/Starter/Options.md) -section. - -Using the ArangoDB Starter in Docker ------------------------------------- - -The _Starter_ can also be used to launch an Active Failover setup based on _Docker_ -containers. To do this, you can use the normal Docker arguments, combined with -`--starter.mode=activefailover`: - -```bash -export IP= -docker volume create arangodb -docker run -it --name=adb --rm -p 8528:8528 \ - -v arangodb:/data \ - -v /var/run/docker.sock:/var/run/docker.sock \ - arangodb/arangodb-starter \ - --starter.address=$IP \ - --starter.mode=activefailover \ - --starter.join=A,B,C -``` - -Run the above command on machine A, B & C. - -The _Starter_ will decide on which 2 machines to run a single server instance. -To override this decision (only valid while bootstrapping), add a -`--cluster.start-single=false` to the machine where the single server -instance should _not_ be started. - -If you use an ArangoDB version of 3.4 or above and use the Enterprise -Edition Docker image, you have to set the license key in an environment -variable by adding this option to the above `docker` command: - -``` - -e ARANGO_LICENSE_KEY= -``` - -You can get a free evaluation license key by visiting - - https://www.arangodb.com/download-arangodb-enterprise/ - -Then replace `` above with the actual license key. The start -will then hand on the license key to the Docker containers it launches -for ArangoDB. - -### TLS verified Docker services - -Oftentimes, one needs to harden Docker services using client certificate -and TLS verification. The Docker API allows subsequently only certified access. -As the ArangoDB starter starts the ArangoDB cluster instances using this Docker API, -it is mandatory that the ArangoDB starter is deployed with the proper certificates -handed to it, so that the above command is modified as follows: - -```bash -export IP= -export DOCKER_TLS_VERIFY=1 -export DOCKER_CERT_PATH=/path/to/certificate -docker volume create arangodb -docker run -it --name=adb --rm -p 8528:8528 \ - -v arangodb:/data \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v /path/to/certificate:/path/to/certificate - arangodb/arangodb-starter \ - --starter.address=$IP \ - --starter.mode=activefailover \ - --starter.join=A,B,C -``` - -Note that the enviroment variables `DOCKER_TLS_VERIFY` and `DOCKER_CERT_PATH` -as well as the additional mountpoint containing the certificate have been added above. -directory. The assignment of `DOCKER_CERT_PATH` is optional, in which case it -is mandatory that the certificates are stored in `$HOME/.docker`. So -the command would then be as follows - -```bash -export IP= -docker volume create arangodb -docker run -it --name=adb --rm -p 8528:8528 \ - -v arangodb:/data \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v /path/to/cert:/root/.docker \ - -e DOCKER_TLS_VERIFY=1 \ - arangodb/arangodb-starter \ - --starter.address=$IP \ - --starter.mode=activefailover \ - --starter.join=A,B,C -``` diff --git a/Documentation/Books/Manual/Deployment/ArangoDBStarter/README.md b/Documentation/Books/Manual/Deployment/ArangoDBStarter/README.md deleted file mode 100644 index 516e2802c561..000000000000 --- a/Documentation/Books/Manual/Deployment/ArangoDBStarter/README.md +++ /dev/null @@ -1,18 +0,0 @@ - -Deploying using the ArangoDB Starter -==================================== - -**Single Instance:** - -- [_Starter_ using processes](../SingleInstance/UsingTheStarter.md) -- [_Starter_ using Docker containers](../SingleInstance/UsingTheStarter.md#using-the-arangodb-starter-in-docker) - -**Active Failover:** - -- [_Starter_ using processes](../ActiveFailover/UsingTheStarter.md) -- [_Starter_ using Docker containers](../ActiveFailover/UsingTheStarter.md#using-the-arangodb-starter-in-docker) - -**Cluster:** - -- [_Starter_ using processes](../Cluster/UsingTheStarter.md) -- [_Starter_ using Docker containers](../Cluster/UsingTheStarter.md#using-the-arangodb-starter-in-docker) diff --git a/Documentation/Books/Manual/Deployment/Cloud/AWS.md b/Documentation/Books/Manual/Deployment/Cloud/AWS.md deleted file mode 100644 index 9ddb69a96999..000000000000 --- a/Documentation/Books/Manual/Deployment/Cloud/AWS.md +++ /dev/null @@ -1,21 +0,0 @@ -Deploying ArangoDB on AWS -========================= - -ArangoDB can be deployed on AWS or other cloud platforms. Deploying on a cloud -provider is common choice and many of the most big ArangoDB installation are running -on the cloud. - -Up to ArangoDB 3.2, official ArangoDB AMI were available in the [AWS marketplace](https://aws.amazon.com/marketplace/search/results/ref=dtl_navgno_search_box?page=1&searchTerms=arangodb). -Such AMI are not being maintained anymore, though. However, deploying on AWS is -still possible, and again, a quite common scenario. - -After having initialized your preferred AWS instance, with one of the ArangoDB supported -operating systems, using the [ArangoDB Starter](../ArangoDBStarter/README.md), -performing a [Manual Deployment](../Manually/README.md), -or using [Kubernetes](../Kubernetes/README.md), -are all valid options to deploy on AWS. Please refer to the corresponding chapters for further -information. - -**Important:** In order to deploy on AWS, general guidelines, like using a fast, -**direct-attached**, SSD disk for the data directory of the ArangoDB processes -apply. diff --git a/Documentation/Books/Manual/Deployment/Cloud/Azure.md b/Documentation/Books/Manual/Deployment/Cloud/Azure.md deleted file mode 100644 index cb9c8d05c56a..000000000000 --- a/Documentation/Books/Manual/Deployment/Cloud/Azure.md +++ /dev/null @@ -1,20 +0,0 @@ -Deploying ArangoDB on Microsoft Azure -===================================== - -ArangoDB can be deployed on Azure or other cloud platforms. Deploying on a cloud -provider is common choice and many of the most big ArangoDB installation are running -on the cloud. - -No Azure-specific scripts or tools are needed to deploy on Azure. Deploying on Azure -is still possible, and again, a quite common scenario. - -After having initialized your preferred Azure instance, with one of the ArangoDB supported -operating systems, using the [ArangoDB Starter](../ArangoDBStarter/README.md), -performing a [Manual Deployment](../Manually/README.md), -or using [Kubernetes](../Kubernetes/README.md) -are all valid options to deploy on Azure. Please refer to the corresponding chapters for further -information. - -**Important:** In order to deploy on Azure, general guidelines, like using a fast, -**direct-attached**, SSD disk for the data directory of the ArangoDB processes -apply. diff --git a/Documentation/Books/Manual/Deployment/Cloud/README.md b/Documentation/Books/Manual/Deployment/Cloud/README.md deleted file mode 100644 index 607edefd6b9d..000000000000 --- a/Documentation/Books/Manual/Deployment/Cloud/README.md +++ /dev/null @@ -1,5 +0,0 @@ -ArangoDB Deploying Options in the _Cloud_ -========================================= - -- [AWS](AWS.md) -- [Azure](Azure.md) diff --git a/Documentation/Books/Manual/Deployment/Cluster/Kubernetes.md b/Documentation/Books/Manual/Deployment/Cluster/Kubernetes.md deleted file mode 100644 index 9f7a748b5749..000000000000 --- a/Documentation/Books/Manual/Deployment/Cluster/Kubernetes.md +++ /dev/null @@ -1,4 +0,0 @@ -Cluster Deployments in Kubernetes -================================= - -Please refer to the [Kubernetes](../Kubernetes/README.md) chapter. diff --git a/Documentation/Books/Manual/Deployment/Cluster/ManualStart.md b/Documentation/Books/Manual/Deployment/Cluster/ManualStart.md deleted file mode 100644 index dc91e0474a51..000000000000 --- a/Documentation/Books/Manual/Deployment/Cluster/ManualStart.md +++ /dev/null @@ -1,388 +0,0 @@ -Starting Manually -================= - -An ArangoDB Cluster consists of several running _tasks_ or _processes_ which -form the Cluster. - -This section describes how to start a Cluster by manually starting all the needed -processes. - -Before continuing, be sure to read the [Architecture](../../Architecture/DeploymentModes/Cluster/Architecture.md) -section to get a basic understanding of the underlying architecture and the involved -roles in an ArangoDB Cluster. - -We will include commands for a local test (all processes running on a single machine) -and for a more real production scenario, which makes use of 3 different machines. - -Local Tests ------------ - -In this paragraph we will include commands to manually start a Cluster with 3 _Agents_, -2 _DBservers_ and 2 _Coordinators_. - -We will assume that all processes runs on the same machine (127.0.0.1). Such scenario -should be used for testing only. - -### Local Test Agency - -To start up an _Agency_ you first have to activate it. This is done by providing -the option `--agency.activate true`. - -To start up the _Agency_ in its fault tolerant mode set the `--agency.size` to `3`. -You will then have to start at least 3 _Agents_ before the _Agency_ will start operation. - -During initialization the _Agents_ have to find each other. To do so provide at -least one common `--agency.endpoint`. The _Agents_ will then coordinate startup -themselves. They will announce themselves with their external address which may be -specified using `--agency.my-address`. This is required in bridged docker setups -or NATed environments. - -So in summary these are the commands to start an _Agency_ of size 3: - -``` -arangod --server.endpoint tcp://0.0.0.0:5001 \ - --agency.my-address=tcp://127.0.0.1:5001 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.endpoint tcp://127.0.0.1:5001 \ - --agency.supervision true \ - --database.directory agent1 & - -arangod --server.endpoint tcp://0.0.0.0:5002 \ - --agency.my-address=tcp://127.0.0.1:5002 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.endpoint tcp://127.0.0.1:5001 \ - --agency.supervision true \ - --database.directory agent2 & - -arangod --server.endpoint tcp://0.0.0.0:5003 \ - --agency.my-address=tcp://127.0.0.1:5003 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.endpoint tcp://127.0.0.1:5001 \ - --agency.supervision true \ - --database.directory agent3 & -``` - -### Local Test DBServers and Coordinators - -These two roles share a common set of relevant options. First you should specify -the role using `--cluster.my-role`. This can either be `PRIMARY` (a database server) -or `COORDINATOR`. Note that starting from v.3.4 `DBSERVER` is allowed as an alias -for `PRIMARY` as well. Furthermore please provide the external endpoint (IP and port) -of the process via `--cluster.my-address`. - -The following is a full example of what it might look like. - -**DBServers:** - -``` -arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:6001 \ - --cluster.my-address tcp://127.0.0.1:6001 \ - --cluster.my-role DBSERVER \ - --cluster.agency-endpoint tcp://127.0.0.1:5001 \ - --cluster.agency-endpoint tcp://127.0.0.1:5002 \ - --cluster.agency-endpoint tcp://127.0.0.1:5003 \ - --database.directory dbserver1 & - -arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:6002 \ - --cluster.my-address tcp://127.0.0.1:6002 \ - --cluster.my-role DBSERVER \ - --cluster.agency-endpoint tcp://127.0.0.1:5001 \ - --cluster.agency-endpoint tcp://127.0.0.1:5002 \ - --cluster.agency-endpoint tcp://127.0.0.1:5003 \ - --database.directory dbserver2 & -``` - -**Coordinators:** - -``` -arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:7001 \ - --cluster.my-address tcp://127.0.0.1:7001 \ - --cluster.my-role COORDINATOR \ - --cluster.agency-endpoint tcp://127.0.0.1:5001 \ - --cluster.agency-endpoint tcp://127.0.0.1:5002 \ - --cluster.agency-endpoint tcp://127.0.0.1:5003 \ - --database.directory coordinator1 & -``` - -``` -arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:7002 \ - --cluster.my-address tcp://127.0.0.1:7002 \ - --cluster.my-role COORDINATOR \ - --cluster.agency-endpoint tcp://127.0.0.1:5001 \ - --cluster.agency-endpoint tcp://127.0.0.1:5002 \ - --cluster.agency-endpoint tcp://127.0.0.1:5003 \ - --database.directory coordinator2 & -``` - -Note in particular that the endpoint descriptions given under `--cluster.my-address` -and `--cluster.agency-endpoint` must not use the IP address `0.0.0.0` because they -must contain an actual address that can be routed to the corresponding server. The -`0.0.0.0` in `--server.endpoint` simply means that the server binds itself to all -available network devices with all available IP addresses. - -Upon registering with the _Agency_ during startup the Cluster will assign an _ID_ -to every server. The generated _ID_ will be printed out to the log or can be accessed -via the HTTP API by calling `http://server-address/_admin/server/id`. - -You have now launched an ArangoDB Cluster and can contact its _Coordinators_ (and -their corresponding web UI) at the endpoint `tcp://127.0.0.1:7001` and `tcp://127.0.0.1:7002`. - -Multiple Machines ------------------ - -The method from the previous paragraph can be extended to a more real production scenario, -to start an ArangoDB Cluster on multiple machines. The only changes are that one -has to replace all local addresses `127.0.0.1` by the actual IP address of the -corresponding server. Obviously, it would no longer be necessary to use different port numbers -on different servers. - -Let's assume that you want to start your ArangoDB Cluster with 3 _Agents_, 3 _DBServers_ -and 3 _Coordinators_ on three different machines with IP addresses: - -``` -192.168.1.1 -192.168.1.2 -192.168.1.3 -``` - -Let's also suppose that each of the above machines runs an _Agent_, a _DBServer_ -and a _Coordinator_ - -If we use: - -- _8531_ as port of the _Agents_ -- _8530_ as port of the _DBServers_ -- _8529_ as port of the _Coordinators_ - -then the commands you have to use are reported in the following subparagraphs. - -### Agency - -On 192.168.1.1: - -``` -arangod --server.endpoint tcp://0.0.0.0:8531 \ - --agency.my-address tcp://192.168.1.1:8531 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.supervision true \ - --database.directory agent -``` - -On 192.168.1.2: - -``` -arangod --server.endpoint tcp://0.0.0.0:8531 \ - --agency.my-address tcp://192.168.1.2:8531 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.supervision true \ - --database.directory agent -``` - -On 192.168.1.3: - -``` -arangod --server.endpoint tcp://0.0.0.0:8531 \ - --agency.my-address tcp://192.168.1.3:8531 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.endpoint tcp://192.168.1.1:8531 \ - --agency.endpoint tcp://192.168.1.2:8531 \ - --agency.endpoint tcp://192.168.1.3:8531 \ - --agency.supervision true \ - --database.directory agent -``` - -### DBServers - -In the commands below, note that `DBSERVER`, as value of the option -`--cluster.my-role`, is allowed only from version 3.4; for previous -versions, to start a _DBServer_, please use `PRIMARY` as role. - -On 192.168.1.1: - -``` -arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:8530 \ - --cluster.my-address tcp://192.168.1.1:8530 \ - --cluster.my-role DBSERVER \ - --cluster.agency-endpoint tcp://192.168.1.1:8531 \ - --cluster.agency-endpoint tcp://192.168.1.2:8531 \ - --cluster.agency-endpoint tcp://192.168.1.3:8531 \ - --database.directory dbserver & -``` - -On 192.168.1.2: - -``` -arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:8530 \ - --cluster.my-address tcp://192.168.1.2:8530 \ - --cluster.my-role DBSERVER \ - --cluster.agency-endpoint tcp://192.168.1.1:8531 \ - --cluster.agency-endpoint tcp://192.168.1.2:8531 \ - --cluster.agency-endpoint tcp://192.168.1.3:8531 \ - --database.directory dbserver & -``` - -On 192.168.1.3: - -``` -sudo arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:8530 \ - --cluster.my-address tcp://192.168.1.3:8530 \ - --cluster.my-role DBSERVER \ - --cluster.agency-endpoint tcp://192.168.1.1:8531 \ - --cluster.agency-endpoint tcp://192.168.1.2:8531 \ - --cluster.agency-endpoint tcp://192.168.1.3:8531 \ - --database.directory dbserver & -``` - -### Coordinators - -On 192.168.1.1: - -``` -arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:8529 \ - --cluster.my-address tcp://192.168.1.1:8529 \ - --cluster.my-role COORDINATOR \ - --cluster.agency-endpoint tcp://192.168.1.1:8531 \ - --cluster.agency-endpoint tcp://192.168.1.2:8531 \ - --cluster.agency-endpoint tcp://192.168.1.3:8531 \ - --database.directory coordinator & -``` - -On 192.168.1.2: - -``` -arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:8529 \ - --cluster.my-address tcp://192.168.1.2:8529 \ - --cluster.my-role COORDINATOR \ - --cluster.agency-endpoint tcp://192.168.1.1:8531 \ - --cluster.agency-endpoint tcp://192.168.1.2:8531 \ - --cluster.agency-endpoint tcp://192.168.1.3:8531 \ - --database.directory coordinator & -``` - -On 192.168.1.3: - -``` -arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:8529 \ - --cluster.my-address tcp://192.168.1.3:8529 \ - --cluster.my-role COORDINATOR \ - --cluster.agency-endpoint tcp://192.168.1.1:8531 \ - --cluster.agency-endpoint tcp://192.168.1.2:8531 \ - --cluster.agency-endpoint tcp://192.168.1.3:8531 \ - --database.directory coordinator & -``` - -**Note:** in the above commands, you can use host names, if they can be resolved, -instead of IP addresses. - -**Note 2:** you can easily extend the Cluster, by adding more machines which run -a _DBServer_ and a _Coordiantor_. For instance, if you have an additional forth -machine with IP 192.168.1.4, you can execute the following commands - -On 192.168.1.4: - -``` -arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:8530 \ - --cluster.my-address tcp://192.168.4.1:8530 \ - --cluster.my-role DBSERVER \ - --cluster.agency-endpoint tcp://192.168.1.1:8531 \ - --cluster.agency-endpoint tcp://192.168.1.2:8531 \ - --cluster.agency-endpoint tcp://192.168.1.3:8531 \ - --database.directory dbserver & - -arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:8529 \ - --cluster.my-address tcp://192.168.1.4:8529 \ - --cluster.my-role COORDINATOR \ - --cluster.agency-endpoint tcp://192.168.1.1:8531 \ - --cluster.agency-endpoint tcp://192.168.1.2:8531 \ - --cluster.agency-endpoint tcp://192.168.1.3:8531 \ - --database.directory coordinator & -``` - -Manual Start in Docker ----------------------- - -Manually starting a Cluster via Docker is basically the same as described in the -paragraphs above. - -A bit of extra care has to be invested due to the way in which Docker isolates its network. -By default it fully isolates the network and by doing so an endpoint like `--server.endpoint tcp://0.0.0.0:8530` -will only bind to all interfaces inside the Docker container which does not include -any external interface on the host machine. This may be sufficient if you just want -to access it locally but in case you want to expose it to the outside you must -facilitate Dockers port forwarding using the `-p` command line option. Be sure to -check the [official Docker documentation](https://docs.docker.com/engine/reference/run/). - -You can simply use the `-p` flag in Docker to make the individual processes available on the host -machine or you could use Docker's [links](https://docs.docker.com/engine/reference/run/) -to enable process intercommunication. - -An example configuration might look like this: - -``` -docker run -e ARANGO_NO_AUTH=1 -p 192.168.1.1:10000:8530 arangodb/arangodb arangod \ - --server.endpoint tcp://0.0.0.0:8530 \ - --cluster.my-address tcp://192.168.1.1:10000 \ - --cluster.my-role DBSERVER \ - --cluster.agency-endpoint tcp://192.168.1.1:9001 \ - --cluster.agency-endpoint tcp://192.168.1.2:9001 \ - --cluster.agency-endpoint tcp://192.168.1.3:9001 -``` - -This will start a _DBServer_ within a Docker container with an isolated network. -Within the Docker container it will bind to all interfaces (this will be 127.0.0.1:8530 -and some internal Docker IP on port 8530). By supplying `-p 192.168.1.1:10000:8530` -we are establishing a port forwarding from our local IP (192.168.1.1 port 10000 in -this example) to port 8530 inside the container. Within the command we are telling -_arangod_ how it can be reached from the outside `--cluster.my-address tcp://192.168.1.1:10000`. -This information will be forwarded to the _Agency_ so that the other processes in -your Cluster can see how this particular _DBServer_ may be reached. - -### Authentication - -To start the official Docker container you will have to decide on an authentication -method, otherwise the container will not start. - -Provide one of the arguments to Docker as an environment variable. There are three -options: - -1. ARANGO_NO_AUTH=1 - - Disable authentication completely. Useful for local testing or for operating - in a trusted network (without a public interface). - -2. ARANGO_ROOT_PASSWORD=password - - Start ArangoDB with the given password for root. - -3. ARANGO_RANDOM_ROOT_PASSWORD=1 - - Let ArangoDB generate a random root password. - -For an in depth guide about Docker and ArangoDB please check the official documentation: -https://hub.docker.com/r/arangodb/arangodb/ . Note that we are using the image -`arangodb/arangodb` here which is always the most current one. There is also the -"official" one called `arangodb` whose documentation is here: https://hub.docker.com/_/arangodb/ diff --git a/Documentation/Books/Manual/Deployment/Cluster/Mesos.md b/Documentation/Books/Manual/Deployment/Cluster/Mesos.md deleted file mode 100644 index 2b1220016391..000000000000 --- a/Documentation/Books/Manual/Deployment/Cluster/Mesos.md +++ /dev/null @@ -1,4 +0,0 @@ -Cluster Deployments in Mesos, DC/OS -=================================== - -Please refer to the [Mesos, DC/OS](../DCOS/README.md) chapter. \ No newline at end of file diff --git a/Documentation/Books/Manual/Deployment/Cluster/PreliminaryInformation.md b/Documentation/Books/Manual/Deployment/Cluster/PreliminaryInformation.md deleted file mode 100644 index e5fc69853803..000000000000 --- a/Documentation/Books/Manual/Deployment/Cluster/PreliminaryInformation.md +++ /dev/null @@ -1,86 +0,0 @@ -Preliminary Information -======================= - -For Debian/Ubuntu Systems -------------------------- - -### Use a different configuration file for the Cluster instance - -The configuration file used for the standalone instance is -`/etc/arangodb3/arangod.conf` (on Linux), and you should use a different one for -the cluster instance(s). If you are using the _Starter_ binary `arangodb`, that is -automatically the case. Otherwise, you might have to copy the configuration -somewhere else and pass it to your `arangod` cluster instance via -`--configuration`. - -### Use a different data directory for the standalone instance - -The data directory is configured in `arangod.conf`: - -``` -[database] -directory = /var/lib/arangodb3 -``` - -You have to make sure that the Cluster instance uses a different data directory -as the standalone instance. If that is not already the case, change the -`database.directory` entry in `arangod.conf` as seen above to a different -directory - -``` -# in arangod.conf: -[database] -directory = /var/lib/arangodb3.standalone -``` - -and create it with the correct permissions: - -``` -$ mkdir -vp /var/lib/arangodb3.standalone -$ chown -c arangodb:arangodb /var/lib/arangodb3.standalone -$ chmod -c 0700 /var/lib/arangodb3.standalone -``` - -### Use a different socket for the standalone instance - -The standalone instance must use a different socket, i.e. it cannot use the -same port on the same network interface than the Cluster. For that, change the -standalone instance's port in `/etc/arangodb3/arangod.conf` - -``` -[server] -endpoint = tcp://127.0.0.1:8529 -``` - -to something unused, e.g. - -``` -[server] -endpoint = tcp://127.1.2.3:45678 -``` -. - -### Use a different _init_ script for the Cluster instance - -This section applies to SystemV-compatible init systems (e.g. sysvinit, OpenRC, -upstart). The steps are different for systemd. - -The package install scripts use the default _init_ script `/etc/init.d/arangodb3` -(on Linux) to stop and start ArangoDB during the installation. If you are using -an _init_ script for your Cluster instance, make sure it is named differently. -In addition, the installation might overwrite your _init_ script otherwise. - -If you have previously changed the default _init_ script, move it out of the way - -``` -$ mv -vi /etc/init.d/arangodb3 /etc/init.d/arangodb3.cluster -``` - -and add it to the _autostart_; how this is done depends on your distribution and -_init_ system. On older Debian and Ubuntu systems, you can use `update-rc.d`: - -``` -$ update-rc.d arangodb3.cluster defaults -``` - -Make sure your _init_ script uses a different `PIDFILE` than the default script! diff --git a/Documentation/Books/Manual/Deployment/Cluster/README.md b/Documentation/Books/Manual/Deployment/Cluster/README.md deleted file mode 100644 index 00a66f78fb04..000000000000 --- a/Documentation/Books/Manual/Deployment/Cluster/README.md +++ /dev/null @@ -1,18 +0,0 @@ -Cluster Deployment -================== - -This _Chapter_ describes how to deploy an _ArangoDB Cluster_. - -For a general introduction to the _ArangoDB Cluster_, please refer to the -[Cluster](../../Architecture/DeploymentModes/Cluster/README.md) chapter. - -- [Preliminary Information](PreliminaryInformation.md) -- [Using the ArangoDB Starter](UsingTheStarter.md) -- [Manual Start](ManualStart.md) -- [Kubernetes](Kubernetes.md) -- [Mesos, DC/OS](Mesos.md) - -Go through the detailed -[ArangoDB Cluster Administration Course](https://www.arangodb.com/arangodb-cluster-course/) -to dig deeper into maintenance, resilience and troubleshooting of your -distributed environment. diff --git a/Documentation/Books/Manual/Deployment/Cluster/UsingTheStarter.md b/Documentation/Books/Manual/Deployment/Cluster/UsingTheStarter.md deleted file mode 100644 index 41ae77c216a2..000000000000 --- a/Documentation/Books/Manual/Deployment/Cluster/UsingTheStarter.md +++ /dev/null @@ -1,128 +0,0 @@ - -Using the ArangoDB Starter -========================== - -This section describes how to start a Cluster using the tool [_Starter_](../../Programs/Starter/README.md) -(the _arangodb_ binary program). - -Local Tests ------------ - -If you only want a local test Cluster, you can run a single _Starter_ with the -`--starter.local` argument. It will start a 3 "machine" Cluster on your local PC: - -``` -arangodb --starter.local --starter.data-dir=./localdata -``` - -**Note:** a local Cluster is intended only for test purposes since a failure of -a single PC will bring down the entire Cluster. - -Multiple Machines ------------------ - -If you want to start a Cluster using the _Starter_, you can use the following command: - -``` -arangodb --server.storage-engine=rocksdb --starter.data-dir=./data --starter.join A,B,C -``` - -Run the above command on machine A, B & C. - -Once all the processes started by the _Starter_ are up and running, and joined the -Cluster (this may take a while depending on your system), the _Starter_ will inform -you where to connect the Cluster from a Browser, shell or your program. - -For a full list of options of the _Starter_ please refer to [this](../../Programs/Starter/Options.md) -section. - -Using the ArangoDB Starter in Docker ------------------------------------- - -The _Starter_ can also be used to launch Clusters based on _Docker_ containers: - -```bash -export IP= -docker volume create arangodb -docker run -it --name=adb --rm -p 8528:8528 \ - -v arangodb:/data \ - -v /var/run/docker.sock:/var/run/docker.sock \ - arangodb/arangodb-starter \ - --starter.address=$IP \ - --starter.join=A,B,C -``` - -Run the above command on machine A, B & C. - -If you use an ArangoDB version of 3.4 or above and use the Enterprise -Edition Docker image, you have to set the license key in an environment -variable by adding this option to the above `docker` command: - -``` - -e ARANGO_LICENSE_KEY= -``` - -You can get a free evaluation license key by visiting - - https://www.arangodb.com/download-arangodb-enterprise/ - -Then replace `` above with the actual license key. The start -will then hand on the license key to the Docker containers it launches -for ArangoDB. - -**TLS verified Docker services** - -Oftentimes, one needs to harden Docker services using client certificate -and TLS verification. The Docker API allows subsequently only certified access. -As the ArangoDB starter starts the ArangoDB cluster instances using this Docker API, -it is mandatory that the ArangoDB starter is deployed with the proper certificates -handed to it, so that the above command is modified as follows: - -```bash -export IP= -export DOCKER_CERT_PATH=/path/to/certificate -docker volume create arangodb -docker run -it --name=adb --rm -p 8528:8528 \ - -v arangodb:/data \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v $DOCKER_CERT_PATH:$DOCKER_CERT_PATH \ - -e DOCKER_TLS_VERIFY=1 \ - -e DOCKER_CERT_PATH=$DOCKER_CERT_PATH \ - arangodb/arangodb-starter \ - --starter.address=$IP \ - --starter.join=A,B,C -``` - -Note that the enviroment variables `DOCKER_TLS_VERIFY` and `DOCKER_CERT_PATH` -as well as the additional mountpoint containing the certificate have been added above. -directory. The assignment of `DOCKER_CERT_PATH` is optional, in which case it -is mandatory that the certificates are stored in `$HOME/.docker`. So -the command would then be as follows - -```bash -export IP= -docker volume create arangodb -docker run -it --name=adb --rm -p 8528:8528 \ - -v arangodb:/data \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v /path/to/cert:/root/.docker \ - -e DOCKER_TLS_VERIFY=1 \ - arangodb/arangodb-starter \ - --starter.address=$IP \ - --starter.join=A,B,C -``` - -Under the Hood --------------- -The first `arangodb` you ran will become the _master_ of your _Starter_ -setup, the other `arangodb` instances will become the _slaves_ of your _Starter_ -setup. Please do not confuse the terms _master_ and _slave_ above with the master/slave -technology of ArangoDB. The terms above refers to the _Starter_ setup. - -The _Starter_ _master_ determines which ArangoDB server processes to launch on which -_Starter_ _slave_, and how they should communicate. - -It will then launch the server processes and monitor them. Once it has detected -that the setup is complete you will get the prompt. - -The _Starter_ _master_ will save the setup for subsequent starts. diff --git a/Documentation/Books/Manual/Deployment/DC2DC/ArangoSyncMaster.md b/Documentation/Books/Manual/Deployment/DC2DC/ArangoSyncMaster.md deleted file mode 100644 index ed26aaab3421..000000000000 --- a/Documentation/Books/Manual/Deployment/DC2DC/ArangoSyncMaster.md +++ /dev/null @@ -1,91 +0,0 @@ - -# ArangoSync Master - -The _ArangoSync Master_ is responsible for managing all synchronization, creating -tasks and assigning those to the _ArangoSync Workers_. - -At least 2 instances must be deployed in each datacenter. -One instance will be the "leader", the other will be an inactive slave. When the -leader is gone for a short while, one of the other instances will take over. - -With clusters of a significant size, the _sync master_ will require a -significant set of resources. Therefore it is recommended to deploy the _sync masters_ -on their own servers, equiped with sufficient CPU power and memory capacity. - -To start an _ArangoSync Master_ using a `systemd` service, use a unit like this: - -```text -[Unit] -Description=Run ArangoSync in master mode -After=network.target - -[Service] -Restart=on-failure -EnvironmentFile=/etc/arangodb.env -EnvironmentFile=/etc/arangodb.env.local -LimitNOFILE=8192 -ExecStart=/usr/sbin/arangosync run master \ - --log.level=debug \ - --cluster.endpoint=${CLUSTERENDPOINTS} \ - --cluster.jwtSecret=${CLUSTERSECRET} \ - --server.keyfile=${CERTIFICATEDIR}/tls.keyfile \ - --server.client-cafile=${CERTIFICATEDIR}/client-auth-ca.crt \ - --server.endpoint=https://${PRIVATEIP}:${MASTERPORT} \ - --server.port=${MASTERPORT} \ - --master.endpoint=${PUBLICMASTERENDPOINTS} \ - --master.jwtSecret=${MASTERSECRET} \ - --mq.type=direct -TimeoutStopSec=60 - -[Install] -WantedBy=multi-user.target -``` - -When using the `kafka` type message queue, replace `--mq.type=direct` with `--mq.type=kafka` -and add the following arguments. - -```text - --mq.kafka-addr=${KAFKAENDPOINTS} \ - --mq.kafka-client-keyfile=${CERTIFICATEDIR}/kafka-client.key \ - --mq.kafka-cacert=${CERTIFICATEDIR}/tls-ca.crt -``` - -The _sync master_ needs a TLS server certificate and a -If you want the service to create a TLS certificate & client authentication -certificate, for authenticating with _ArangoSync Masters_ in another datacenter, -for every start, add this to the `Service` section. - -```text -ExecStartPre=/usr/bin/sh -c "mkdir -p ${CERTIFICATEDIR}" -ExecStartPre=/usr/sbin/arangosync create tls keyfile \ - --cacert=${CERTIFICATEDIR}/tls-ca.crt \ - --cakey=${CERTIFICATEDIR}/tls-ca.key \ - --keyfile=${CERTIFICATEDIR}/tls.keyfile \ - --host=${PUBLICIP} \ - --host=${PRIVATEIP} \ - --host=${HOST} \ - --host=${CLUSTERDNSNAME} -ExecStartPre=/usr/sbin/arangosync create client-auth keyfile \ - --cacert=${CERTIFICATEDIR}/tls-ca.crt \ - --cakey=${CERTIFICATEDIR}/tls-ca.key \ - --keyfile=${CERTIFICATEDIR}/kafka-client.key \ - --host=${PUBLICIP} \ - --host=${PRIVATEIP} \ - --host=${HOST} -``` - -The _ArangoSync Master_ must be reachable on a TCP port `${MASTERPORT}` (used with `--server.port` option). -This port must be reachable from inside the datacenter (by sync workers and operations) -and from inside of the other datacenter (by sync masters in the other datacenter). - -Note that other sync masters in the same datacenter will contact this sync master -through the endpoint specified in `--server.endpoint`. -Sync masters (&sync workers) from the other datacenter will contains this sync master -through the endpoint specified in `--master.endpoint`. - -## Recommended deployment environment - -Since the _sync masters_ can be CPU intensive when running lots of databases & collections, -it is recommended to run them on dedicated machines with a lot of CPU power. - -Consider these machines "pets". diff --git a/Documentation/Books/Manual/Deployment/DC2DC/ArangoSyncWorkers.md b/Documentation/Books/Manual/Deployment/DC2DC/ArangoSyncWorkers.md deleted file mode 100644 index bcb7e70f0140..000000000000 --- a/Documentation/Books/Manual/Deployment/DC2DC/ArangoSyncWorkers.md +++ /dev/null @@ -1,53 +0,0 @@ - -# ArangoSync Workers - -The _ArangoSync Worker_ is responsible for executing synchronization tasks. - -For optimal performance at least 1 _worker_ instance must be placed on -every machine that has an ArangoDB _DBserver_ running. This ensures that tasks -can be executed with minimal network traffic outside of the machine. - -Since _sync workers_ will automatically stop once their TLS server certificate expires -(which is set to 2 years by default), it is recommended to run at least 2 instances -of a _worker_ on every machine in the datacenter. That way, tasks can still be -assigned in the most optimal way, even when a _worker_ is temporarily down for a -restart. - -To start an _ArangoSync Worker_ using a `systemd` service, use a unit like this: - -```text -[Unit] -Description=Run ArangoSync in worker mode -After=network.target - -[Service] -Restart=on-failure -EnvironmentFile=/etc/arangodb.env -EnvironmentFile=/etc/arangodb.env.local -Environment=PORT=8729 -LimitNOFILE=1000000 -ExecStart=/usr/sbin/arangosync run worker \ - --log.level=debug \ - --server.port=${PORT} \ - --server.endpoint=https://${PRIVATEIP}:${PORT} \ - --master.endpoint=${MASTERENDPOINTS} \ - --master.jwtSecret=${MASTERSECRET} -TimeoutStopSec=60 - -[Install] -WantedBy=multi-user.target -``` - -The _ArangoSync Worker_ must be reachable on a TCP port `${PORT}` (used with `--server.port` -option). This port must be reachable from inside the datacenter (by _sync masters_). - -Note the large file descriptor limit. The _sync worker_ requires about 30 file descriptors per -shard. If you use hardware with huge resources, and still run out of file descriptors, -you can decide to run multiple _sync workers_ on each machine in order to spread the tasks across them. - -## Recommended deployment environment - -The _sync workers_ should be run on all machines that also contain an ArangoDB _DBServer_. -The _sync worker_ can be memory intensive when running lots of databases & collections. - -Consider these machines "cattle". diff --git a/Documentation/Books/Manual/Deployment/DC2DC/Cluster.md b/Documentation/Books/Manual/Deployment/DC2DC/Cluster.md deleted file mode 100644 index e59928975d85..000000000000 --- a/Documentation/Books/Manual/Deployment/DC2DC/Cluster.md +++ /dev/null @@ -1,80 +0,0 @@ - -# ArangoDB cluster - -There are several ways to start an ArangoDB cluster. In this section we will focus -on our recommended way to start ArangoDB: the ArangoDB _Starter_. - -_Datacenter to datacenter replication_ requires the `rocksdb` storage engine. The -example setup described in this section will have `rocksdb` enabled. If you choose -to deploy with a different strategy keep in mind to set the storage engine. - -For the other possibilities to deploy an ArangoDB cluster please refer to -[this](../Cluster/README.md) section. - -The _Starter_ simplifies things for the operator and will coordinate a distributed -cluster startup across several machines and assign cluster roles automatically. - -When started on several machines and enough machines have joined, the _Starters_ -will start _Agents_, _Coordinators_ and _DBservers_ on these machines. - -When running the _Starter_ will supervise its child tasks (namely _Coordinators_, -_DBservers_ and _Agents_) and restart them in case of failure. - -To start the cluster using a `systemd` unit file use the following: - -```text -[Unit] -Description=Run the ArangoDB Starter -After=network.target - -[Service] -Restart=on-failure -EnvironmentFile=/etc/arangodb.env -EnvironmentFile=/etc/arangodb.env.local -Environment=DATADIR=/var/lib/arangodb/cluster -ExecStartPre=/usr/bin/sh -c "mkdir -p ${DATADIR}" -ExecStart=/usr/bin/arangodb \ - --starter.address=${PRIVATEIP} \ - --starter.data-dir=${DATADIR} \ - --starter.join=${STARTERENDPOINTS} \ - --server.storage-engine=rocksdb \ - --auth.jwt-secret=${CLUSTERSECRETPATH} -TimeoutStopSec=60 - -[Install] -WantedBy=multi-user.target -``` - -Note that we set `rocksdb` in the unit service file. - -## Cluster authentication - -The communication between the cluster nodes use a token (JWT) to authenticate. -This must be shared between cluster nodes. - -Sharing secrets is obviously a very delicate topic. The above workflow assumes -that the operator will put a secret in a file named `${CLUSTERSECRETPATH}`. - -We recommend to use a dedicated system for managing secrets like HashiCorps' `Vault` or the -secret management of `DC/OS`. - -## Required ports - -As soon as enough machines have joined, the _Starter_ will begin starting _Agents_, -_Coordinators_ and _DBservers_. - -Each of these tasks needs a port to communicate. Please make sure that the following -ports are available on all machines: - -- `8529` for Coordinators -- `8530` for DBservers -- `8531` for Agents - -The _Starter_ itself will use port `8528`. - -## Recommended deployment environment - -Since the _Agents_ are so critical to the availability of both the ArangoDB and the ArangoSync cluster, -it is recommended to run _Agents_ on dedicated machines. Consider these machines "pets". - -_Coordinators_ and _DBServers_ can be deployed on other machines that should be considered "cattle". diff --git a/Documentation/Books/Manual/Deployment/DC2DC/KafkaZookeeper.md b/Documentation/Books/Manual/Deployment/DC2DC/KafkaZookeeper.md deleted file mode 100644 index 35e28415f980..000000000000 --- a/Documentation/Books/Manual/Deployment/DC2DC/KafkaZookeeper.md +++ /dev/null @@ -1,17 +0,0 @@ - -# Kafka & Zookeeper - -{% hint 'tip' %} -We recommend to use DirectMQ instead of Kafka as message queue, -because it is simpler to use and tailored to the needs of ArangoDB. -It also removes the need for Zookeeper. - -DirectMQ is available since ArangoSync v0.5.0 -(ArangoDB Enterprise Edition v3.3.8). -{% endhint %} - -## Recommended deployment environment - -Since the Kafka brokers are really CPU and memory intensive, -it is recommended to run Zookeeper & Kafka on dedicated machines. -Consider these machines "pets". diff --git a/Documentation/Books/Manual/Deployment/DC2DC/PrometheusGrafana.md b/Documentation/Books/Manual/Deployment/DC2DC/PrometheusGrafana.md deleted file mode 100644 index 8a25f4500962..000000000000 --- a/Documentation/Books/Manual/Deployment/DC2DC/PrometheusGrafana.md +++ /dev/null @@ -1,95 +0,0 @@ - -# Prometheus & Grafana (optional) - -_ArangoSync_ provides metrics in a format supported by [Prometheus](https://prometheus.io). -We also provide a standard set of dashboards for viewing those metrics in [Grafana](https://grafana.org). - -If you want to use these tools, please refer to their websites for instructions -on how to deploy them. - -After deployment, you must configure _Prometheus_ using a configuration file that -instructs it about which targets to scrape. For _ArangoSync_ you should configure -scrape targets for all _sync masters_ and all _sync workers_. To do so, you can -use a configuration such as this: - -```text -global: - scrape_interval: 10s # scrape targets every 10 seconds. - -scrape_configs: - # Scrap sync masters - - job_name: 'sync_master' - scheme: 'https' - bearer_token: "${MONITORINGTOKEN}" - tls_config: - insecure_skip_verify: true - static_configs: - - targets: - - "${IPMASTERA1}:8629" - - "${IPMASTERA2}:8629" - - "${IPMASTERB1}:8629" - - "${IPMASTERB2}:8629" - labels: - type: "master" - relabel_configs: - - source_labels: [__address__] - regex: ${IPMASTERA1}\:8629|${IPMASTERA2}\:8629 - target_label: dc - replacement: A - - source_labels: [__address__] - regex: ${IPMASTERB1}\:8629|${IPMASTERB2}\:8629 - target_label: dc - replacement: B - - source_labels: [__address__] - regex: ${IPMASTERA1}\:8629|${IPMASTERB1}\:8629 - target_label: instance - replacement: 1 - - source_labels: [__address__] - regex: ${IPMASTERA2}\:8629|${IPMASTERB2}\:8629 - target_label: instance - replacement: 2 - - # Scrap sync workers - - job_name: 'sync_worker' - scheme: 'https' - bearer_token: "${MONITORINGTOKEN}" - tls_config: - insecure_skip_verify: true - static_configs: - - targets: - - "${IPWORKERA1}:8729" - - "${IPWORKERA2}:8729" - - "${IPWORKERB1}:8729" - - "${IPWORKERB2}:8729" - labels: - type: "worker" - relabel_configs: - - source_labels: [__address__] - regex: ${IPWORKERA1}\:8729|${IPWORKERA2}\:8729 - target_label: dc - replacement: A - - source_labels: [__address__] - regex: ${IPWORKERB1}\:8729|${IPWORKERB2}\:8729 - target_label: dc - replacement: B - - source_labels: [__address__] - regex: ${IPWORKERA1}\:8729|${IPWORKERB1}\:8729 - target_label: instance - replacement: 1 - - source_labels: [__address__] - regex: ${IPWORKERA2}\:8729|${IPWORKERB2}\:8729 - target_label: instance - replacement: 2 -``` - -Note: The above example assumes 2 datacenters, with 2 _sync masters_ & 2 _sync workers_ -per datacenter. You have to replace all `${...}` variables in the above configuration -with applicable values from your environment. - -## Recommended deployment environment - -_Prometheus_ can be a memory & CPU intensive process. It is recommended to keep them -on other machines than used to run the ArangoDB cluster or _ArangoSync_ components. - -Consider these machines "cattle", unless you configure alerting on _prometheus_, -in which case it is recommended to consider these machines "pets". diff --git a/Documentation/Books/Manual/Deployment/DC2DC/README.md b/Documentation/Books/Manual/Deployment/DC2DC/README.md deleted file mode 100644 index 36d8847b8955..000000000000 --- a/Documentation/Books/Manual/Deployment/DC2DC/README.md +++ /dev/null @@ -1,26 +0,0 @@ - -# Datacenter to datacenter replication deployment - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -This chapter describes how to deploy all the components needed for _datacenter to -datacenter replication_. - -For a general introduction to _datacenter to datacenter replication_, please refer -to the [Datacenter to datacenter replication](../../Architecture/DeploymentModes/DC2DC/README.md) chapter. - -[Requirements](../../Architecture/DeploymentModes/DC2DC/Requirements.md) can be found in this section. - -Deployment steps: - -- [Cluster](Cluster.md) -- [ArangoSync Master](ArangoSyncMaster.md) -- [ArangoSync Workers](ArangoSyncWorkers.md) -- [Prometheus & Grafana (optional)](PrometheusGrafana.md) - -When using the `kafka` type message queue, you also have to deploy: - -- [Kafka & Zookeeper](KafkaZookeeper.md) diff --git a/Documentation/Books/Manual/Deployment/DCOS/ArangoMesosContainerizer.png b/Documentation/Books/Manual/Deployment/DCOS/ArangoMesosContainerizer.png deleted file mode 100644 index fe67a89c4135..000000000000 Binary files a/Documentation/Books/Manual/Deployment/DCOS/ArangoMesosContainerizer.png and /dev/null differ diff --git a/Documentation/Books/Manual/Deployment/DCOS/ClusterMesos.md b/Documentation/Books/Manual/Deployment/DCOS/ClusterMesos.md deleted file mode 100644 index 388e84365a38..000000000000 --- a/Documentation/Books/Manual/Deployment/DCOS/ClusterMesos.md +++ /dev/null @@ -1,223 +0,0 @@ -Cluster Deployments in Mesos, DC/OS -=================================== - -ArangoDB has a sophisticated and yet easy way to use cluster mode. To leverage the -full cluster feature set (monitoring, scaling, automatic failover and automatic -replacement of failed nodes) you might want to run ArangoDB on some kind of cluster -management system, e.g. Apache Mesos. Mesos is a cluster operating system which -powers some of the worlds biggest datacenters running several thousands of nodes. - -DC/OS ------ - -DC/OS eases much of the process to install a Mesos cluster. You can deploy it very -quickly on a variety of cloud hosters or setup your own DC/OS locally. DC/OS is -a set of tools built on top of Apache Mesos. Apache Mesos is a so called "Distributed -Cluster Operation System" and the core of DC/OS. Apache Mesos has the concept of -so called [persistent volumes](http://mesos.apache.org/documentation/latest/persistent-volume/) -which make it perfectly suitable for a database. - -### Installing - -First prepare a DC/OS cluster by going to https://dcos.io and following -the instructions there. - -DC/OS comes with its own package management. Packages can be installed from the -so called "Universe". As an official DC/OS partner, ArangoDB can be installed from -there straight away. - -1. Installing via DC/OS UI - - 1. Open your browser and go to the DC/OS admin interface - 2. Open the "Universe" tab - 3. Locate arangodb and hit "Install Package" - 4. Press "Install Package" - -2. Installing via the DC/OS command line - - 1. Install the [dcos cli](https://docs.mesosphere.com/usage/cli/) - 2. Open a terminal and issue `dcos install arangodb` - -Both options are essentially doing the same in the background. Both are starting -ArangoDB with its default options set. - -To review the default options using the web interface simply click "Advanced Installation" -in the web interface. There you will find a list of options including some explanation. - -To review the default options using the CLI first type `dcos package describe --config arangodb`. -This will give you a flat list of default settings. - -To get an explanation of the various command line options please check the latest -options here (choose the most recent number and have a look at `config.json`): - -https://github.com/mesosphere/universe/tree/version-3.x/repo/packages/A/arangodb - -After installation DC/OS will start deploying the ArangoDB cluster on the DC/OS -cluster. You can watch ArangoDB starting on the "Services" tab in the web interface. -Once it is listed as healthy click the link next to it and you should see the ArangoDB -web interface. - -### ArangoDB Mesos framework - -As soon as ArangoDB was deployed Mesos will keep your cluster running. The web interface -has many monitoring facilities so be sure to make yourself familiar with the DC/OS -web interface. As a fault tolerant system Mesos will take care of most failure -scenarios automatically. Mesos does that by running ArangoDB as a so called "framework". -This framework has been specifically built to keep ArangoDB running in a healthy -condition on the Mesos cluster. From time to time a task might fail. The ArangoDB -framework will then take care of rescheduling the failed task. As it knows about -the very specifics of each cluster task and its role it will automatically take -care of most failure scenarios. - -To inspect what the framework is doing go to `http://web-interface-url/mesos` in -your browser. Locate the task "arangodb" and inspect stderr in the "Sandbox". This -can be of interest for example when a slave got lost and the framework is rescheduling -the task. - -### Using ArangoDB - -To use ArangoDB as a datastore in your DC/OS cluster you can facilitate the service -discovery of DC/OS. Assuming you deployed a standard ArangoDB cluster the -[mesos dns](https://github.com/mesosphere/mesos-dns) will know about `arangodb.mesos`. -By doing a SRV DNS request (check the documentation of mesos dns) you can find out -the port where the internal HAProxy of ArangoDB is running. This will offer a round -robin load balancer to access all ArangoDB coordinators. - -### Scaling ArangoDB - -To change the settings of your ArangoDB Cluster access the ArangoDB UI and hit "Nodes". -On the scale tab you will have the ability to scale your cluster up and down. - -After changing the settings the ArangoDB framework will take care of the rest. -Scaling your cluster up is generally a straightforward operation as Mesos will simply -launch another task and be done with it. Scaling down is a bit more complicated as -the data first has to be moved to some other place so that will naturally take somewhat -longer. - -Please note that scaling operations might not always work. For example if the underlying -Mesos cluster is completely saturated with its running tasks scaling up will not -be possible. Scaling down might also fail due to the cluster not being able to move -all shards of a DBServer to a new destination because of size limitations. Be sure -to check the output of the ArangoDB framework. - -### Deinstallation - -Deinstalling ArangoDB is a bit more difficult as there is much state being kept in -the Mesos cluster which is not automatically cleaned up. To deinstall from the command -line use the following one liner: - -`dcos arangodb uninstall ; dcos package uninstall arangodb` - -This will first cleanup the state in the cluster and then uninstall arangodb. - -### arangodb-cleanup-framework - -Should you forget to cleanup the state you can do so later by using -the [arangodb-cleanup-framework](https://github.com/arangodb/arangodb-cleanup-framework/) -container. Otherwise you might not be able to deploy a new arangodb installation. - -The cleanup framework will announce itself as a normal ArangoDB. Mesos will recognize -this and offer all persistent volumes it still has for ArangoDB to this framework. -The cleanup framework will then properly free the persistent volumes. Finally it -will clean up any state left in zookeeper (the central configuration manager in -a Mesos cluster). - -To deploy the cleanup framework, follow the instructions in the github repository. -After deployment watch the output in the sandbox of the Mesos web interface. After -a while there shouldn't be any persistent resource offers anymore as everything -was cleaned up. After that you can delete the cleanup framework again via Marathon. - -Apache Mesos and Marathon -------------------------- - -You can also install ArangoDB on a bare Apache Mesos cluster provided that Marathon -is running on it. - -Doing so has the following downsides: - -1. Manual Mesos cluster setup -1. You need to implement your own service discovery -1. You are missing the dcos cli -1. Installation and deinstallation are tedious -1. You need to setup some kind of proxy tunnel to access ArangoDB from the outside -1. Sparse monitoring capabilities - -However these are things which do not influence ArangoDB itself and operating your -cluster like this is fully supported. - -### Installing via Marathon - -To install ArangoDB via marathon you need a proper config file: - -``` -{ - "id": "arangodb", - "cpus": 0.25, - "mem": 256.0, - "ports": [0, 0, 0], - "instances": 1, - "args": [ - "framework", - "--framework_name=arangodb", - "--master=zk://172.17.0.2:2181/mesos", - "--zk=zk://172.17.0.2:2181/arangodb", - "--user=", - "--principal=pri", - "--role=arangodb", - "--mode=cluster", - "--async_replication=true", - "--minimal_resources_agent=mem(*):512;cpus(*):0.25;disk(*):512", - "--minimal_resources_dbserver=mem(*):512;cpus(*):0.25;disk(*):1024", - "--minimal_resources_secondary=mem(*):512;cpus(*):0.25;disk(*):1024", - "--minimal_resources_coordinator=mem(*):512;cpus(*):0.25;disk(*):1024", - "--nr_agents=1", - "--nr_dbservers=2", - "--nr_coordinators=2", - "--failover_timeout=86400", - "--arangodb_image=arangodb/arangodb-mesos:3.1", - "--secondaries_with_dbservers=false", - "--coordinators_with_dbservers=false" - ], - "container": { - "type": "DOCKER", - "docker": { - "image": "arangodb/arangodb-mesos-framework:3.1", - "network": "HOST" - } - }, - "healthChecks": [ - { - "protocol": "HTTP", - "path": "/framework/v1/health.json", - "gracePeriodSeconds": 3, - "intervalSeconds": 10, - "portIndex": 0, - "timeoutSeconds": 10, - "maxConsecutiveFailures": 0 - } - ] -} -``` - -Carefully review the settings (especially the IPs and the resources). Then you can -deploy to Marathon: - -``` -curl -X POST -H "Content-Type: application/json" http://url-of-marathon/v2/apps -d @arangodb3.json -``` - -Alternatively use the web interface of Marathon to deploy ArangoDB. It has a JSON -mode and you can use the above configuration file. - -### Deinstallation via Marathon - -As with DC/OS you first need to properly cleanup any state leftovers. - -The easiest is to simply delete ArangoDB and then deploy the cleanup-framework -(see section arangodb-cleanup-framework). - -Configuration options ---------------------- - -The Arangodb Mesos framework has a ton of different options which are listed and -described here: https://github.com/arangodb/arangodb-mesos-framework/tree/3.2 diff --git a/Documentation/Books/Manual/Deployment/DCOS/MesosContainers.md b/Documentation/Books/Manual/Deployment/DCOS/MesosContainers.md deleted file mode 100644 index 048eba0c5c81..000000000000 --- a/Documentation/Books/Manual/Deployment/DCOS/MesosContainers.md +++ /dev/null @@ -1,57 +0,0 @@ -Running ArangoDB Clusters on DC/OS with Mesos Containers -======================================================== - -Since DC/OS 1.8 a new way of running containers in Mesos clouds has become available. It re-uses the docker on-disk format and distribution infrastructure, -but pairs it with management features that make it a better fit for DC/OS environments. - -With ArangoDB 3.2.6 we introduce the possibility to instanciate an ArangoDB Clusters using the Mesos containerizer. You can deploy clusters with it -by unchecking the `USEDOCKER` checkmark: - -![Using the Mesos container engine](ArangoMesosContainerizer.png) - -Once the ArangoDB framework task is up and running you can revalidate its running using the Mesos container engine by clicking on the task, -and scroll all the way down in the *Details* tab: -![Inspecting running task](RunningInMesosContainerizer.png) - -Using the DC/OS cli we can now also list the running tasks: - -``` -# dcos task -NAME HOST USER STATE ID MESOS ID -arangodb3 10.0.1.221 root R arangodb3.988230ce-b95f-11e7-b0b3-d27390e16c96 4339f842-fb3b-46a6-9cb1-46febca9ad31-S4 -arangodb3-Agent1 10.0.3.125 root R f1bbb380-6650-47c6-a6dd-31256b9db2a7 4339f842-fb3b-46a6-9cb1-46febca9ad31-S1 -arangodb3-Agent2 10.0.0.234 root R 410e4df2-5dea-4fae-9724-82e382488acd 4339f842-fb3b-46a6-9cb1-46febca9ad31-S0 -arangodb3-Agent3 10.0.0.231 root R bbb73025-00da-4bdf-8a6d-e34129e3abaf 4339f842-fb3b-46a6-9cb1-46febca9ad31-S5 -arangodb3-Coordinator1 10.0.3.125 root R 9eea93a7-2ada-45c2-8bb6-f3f6153b7fd8 4339f842-fb3b-46a6-9cb1-46febca9ad31-S1 -arangodb3-Coordinator2 10.0.0.234 root R c49496c2-ea66-4b75-9b0d-4d35e637ca77 4339f842-fb3b-46a6-9cb1-46febca9ad31-S0 -arangodb3-DBServer1 10.0.0.234 root R 43bdda44-4edb-457a-bde7-44d5711f076d 4339f842-fb3b-46a6-9cb1-46febca9ad31-S0 -arangodb3-DBServer2 10.0.3.125 root R ff3ad9fb-d69a-4d1a-9bd7-43e782835d83 4339f842-fb3b-46a6-9cb1-46febca9ad31-S1 -``` - -And find the running ArangoDB cluster. We can now use the DC/OS cli to gain a shell on the framework container by picking its ID from the 5th column: - -``` -dcos task exec -it arangodb3.988230ce-b95f-11e7-b0b3-d27390e16c96 bash -``` - -Which will give us an interactive shell in that container. Since the container is stripped down to the bare minimum, we may want to install a bunch of tools for better inspecting the current state: - -``` -root@ip-10-0-1-221:/mnt/mesos/sandbox# export PATH=$PATH:/usr/sbin:/sbin; \ - apt-get update; \ - apt-get install curl net-tools procps netcat jq -``` - -We then can i.e. inspect the running tasks: - -``` -root@ip-10-0-1-221:/mnt/mesos/sandbox# ps -eaf -UID PID PPID C STIME TTY TIME CMD -root 1 0 0 08:36 ? 00:00:00 /opt/mesosphere/active/mesos/libexec/mesos/mesos-containerizer launch -root 6 1 0 08:36 ? 00:00:00 mesos-executor --launcher_dir=/opt/mesosphere/active/mesos/libexec/mesos --sandbox_directory=/mnt/mesos/sandbo -root 16 6 0 08:36 ? 00:00:01 ./arangodb-framework --webui_port=10452 --framework_port=10453 --webui=http://10.0.1.221:10452 framework --fra -root 38 16 0 08:37 ? 00:00:00 haproxy -f /tmp/arango-haproxy.conf -sf 37 -root 40 1 0 08:42 ? 00:00:00 /opt/mesosphere/active/mesos/libexec/mesos/mesos-containerizer launch -root 41 40 0 08:42 ? 00:00:00 bash -root 460 41 0 08:44 ? 00:00:00 ps -eaf -``` diff --git a/Documentation/Books/Manual/Deployment/DCOS/README.md b/Documentation/Books/Manual/Deployment/DCOS/README.md deleted file mode 100644 index d78ffc0a0bf3..000000000000 --- a/Documentation/Books/Manual/Deployment/DCOS/README.md +++ /dev/null @@ -1,5 +0,0 @@ -Deploying ArangoDB in Mesos, DC/OS -================================== - -- [Cluster Deployments in Mesos, DC/OS](ClusterMesos.md) -- [Choosing the container engine](MesosContainers.md) diff --git a/Documentation/Books/Manual/Deployment/DCOS/RunningInMesosContainerizer.png b/Documentation/Books/Manual/Deployment/DCOS/RunningInMesosContainerizer.png deleted file mode 100644 index c3bf9416f21a..000000000000 Binary files a/Documentation/Books/Manual/Deployment/DCOS/RunningInMesosContainerizer.png and /dev/null differ diff --git a/Documentation/Books/Manual/Deployment/Docker/README.md b/Documentation/Books/Manual/Deployment/Docker/README.md deleted file mode 100644 index 1bcfdb16f9eb..000000000000 --- a/Documentation/Books/Manual/Deployment/Docker/README.md +++ /dev/null @@ -1,17 +0,0 @@ -Deploying ArangoDB using Docker -=============================== - -**Single instance:** - -- [Manually created Docker containers](../SingleInstance/ManualStart.md#manual-start-in-docker) -- [ArangoDB _Starter_ using Docker containers](../SingleInstance/UsingTheStarter.md#using-the-arangodb-starter-in-docker) - -**Active Failover:** - -- [Manually created Docker containers](../ActiveFailover/ManualStart.md#manual-start-in-docker) -- [ArangoDB _Starter_ using Docker containers](../ActiveFailover/UsingTheStarter.md#using-the-arangodb-starter-in-docker) - -**Cluster:** - -- [Manually created Docker containers](../Cluster/ManualStart.md#manual-start-in-docker) -- [ArangoDB _Starter_ using Docker containers](../Cluster/UsingTheStarter.md#using-the-arangodb-starter-in-docker) diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/Authentication.md b/Documentation/Books/Manual/Deployment/Kubernetes/Authentication.md deleted file mode 100644 index 75ea59bef68f..000000000000 --- a/Documentation/Books/Manual/Deployment/Kubernetes/Authentication.md +++ /dev/null @@ -1,19 +0,0 @@ - -# Authentication - -The ArangoDB Kubernetes Operator will by default create ArangoDB deployments -that require authentication to access the database. - -It uses a single JWT secret (stored in a Kubernetes secret) -to provide *super-user* access between all servers of the deployment -as well as access from the ArangoDB Operator to the deployment. - -To disable authentication, set `spec.auth.jwtSecretName` to `None`. - -Initially the deployment is accessible through the web user-interface and -APIs, using the user `root` with an empty password. -Make sure to change this password immediately after starting the deployment! - -## See also - -- [Secure connections (TLS)](./Tls.md) diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/ConfigAndSecrets.md b/Documentation/Books/Manual/Deployment/Kubernetes/ConfigAndSecrets.md deleted file mode 100644 index 7f57e56d662d..000000000000 --- a/Documentation/Books/Manual/Deployment/Kubernetes/ConfigAndSecrets.md +++ /dev/null @@ -1,41 +0,0 @@ - -# Configuration & secrets - -An ArangoDB cluster has lots of configuration options. -Some will be supported directly in the ArangoDB Operator, -others will have to specified separately. - -## Built-in options - -All built-in options are passed to ArangoDB servers via commandline -arguments configured in the Pod-spec. - -## Other configuration options - -All commandline options of `arangod` (and `arangosync`) are available -by adding options to the `spec..args` list of a group -of servers. - -These arguments are added to th commandline created for these servers. - -## Secrets - -The ArangoDB cluster needs several secrets such as JWT tokens -TLS certificates and so on. - -All these secrets are stored as Kubernetes Secrets and passed to -the applicable Pods as files, mapped into the Pods filesystem. - -The name of the secret is specified in the custom resource. -For example: - -```yaml -apiVersion: "cluster.arangodb.com/v1alpha" -kind: "Cluster" -metadata: - name: "example-arangodb-cluster" -spec: - mode: Cluster - auth: - jwtSecretName: -``` diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/Dashboards.md b/Documentation/Books/Manual/Deployment/Kubernetes/Dashboards.md deleted file mode 100644 index 09da64d322af..000000000000 --- a/Documentation/Books/Manual/Deployment/Kubernetes/Dashboards.md +++ /dev/null @@ -1,74 +0,0 @@ - -# Operator Dashboards - -The ArangoDB Kubernetes Operator can create a dashboard for each type of -resource it supports. These dashboards are intended to give an overview of -the created resources, their state and instructions on how to modify those resources. - -The dashboards do not provide direct means to modify the resources. -All modifications are done using `kubectl` commands (which are provided by the dashboards) -so the standard security of your Kubernetes cluster is not bypassed. - -## Exposing the dashboards - -For each resource type (deployment, deployment replication & local storage) operator -a `Service` is created that serves the dashboard internally in the Kubernetes cluster. -To expose a dashboard outside the Kubernetes cluster, run a `kubecty expose` -command like this: - -```bash -kubectl expose service --type=LoadBalancer \ - --port=8528 --target-port=8528 \ - --name= --namespace= -``` - -Replace `` with: - -- `arango-deployment-operator` for the ArangoDeployment operator dashboard. -- `arango-deployment-replication-operator` for the ArangoDeploymentReplication - operator dashboard. -- `arango-storage-operator` for the ArangoLocalStorage operator dashboard. - (use 'kube-system' namespace) - -Replace `` with the name of the namespace that the operator is in. -This will often be `default`. - -This will create an additional `Service` of type `LoadBalancer` that copies -the selector from the existing `Service`. -If your Kubernetes cluster does not support loadbalancers, -use `--type=NodePort` instead. - -Run the following command to inspect your new service and look for the -loadbalancer IP/host address (or nodeport). - -```bash -kubectl get service --namespace= -``` - -This will result in something like this: - -```bash -$ kubectl get service arango-storage-operator-lb --namespace=kube-system -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -arango-storage-operator-lb LoadBalancer 10.103.30.24 192.168.31.11 8528:30655/TCP 1d -``` - -## Authentication - -While the dashboards do not provide any means to directly modify resources, -they still show sensitive information (e.g. TLS certificates). -Therefore the dashboards require a username+password for authentications. - -The username+password pair is configured in a generic Kubernetes `Secret` named `arangodb-operator-dashboard`, found in the namespace where the operator runs. - -To create such a secret, run this: - -```bash -kubectl create secret generic \ - arangodb-operator-dashboard --namespace= \ - --from-literal=username= \ - --from-literal=password= -``` - -Until such a `Secret` is found, the operator will respond with a status `401` -to any request related to the dashboard. diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/DeploymentReplicationResource.md b/Documentation/Books/Manual/Deployment/Kubernetes/DeploymentReplicationResource.md deleted file mode 100644 index e8f59f20f063..000000000000 --- a/Documentation/Books/Manual/Deployment/Kubernetes/DeploymentReplicationResource.md +++ /dev/null @@ -1,208 +0,0 @@ - -# ArangoDeploymentReplication Custom Resource - -The ArangoDB Replication Operator creates and maintains ArangoDB -`arangosync` configurations in a Kubernetes cluster, given a replication specification. -This replication specification is a `CustomResource` following -a `CustomResourceDefinition` created by the operator. - -Example minimal replication definition for 2 ArangoDB cluster with sync in the same Kubernetes cluster: - -```yaml -apiVersion: "replication.database.arangodb.com/v1alpha" -kind: "ArangoDeploymentReplication" -metadata: - name: "replication-from-a-to-b" -spec: - source: - deploymentName: cluster-a - auth: - keyfileSecretName: cluster-a-sync-auth - destination: - deploymentName: cluster-b -``` - -This definition results in: - -- the arangosync `SyncMaster` in deployment `cluster-b` is called to configure a synchronization - from the syncmasters in `cluster-a` to the syncmasters in `cluster-b`, - using the client authentication certificate stored in `Secret` `cluster-a-sync-auth`. - To access `cluster-a`, the JWT secret found in the deployment of `cluster-a` is used. - To access `cluster-b`, the JWT secret found in the deployment of `cluster-b` is used. - -Example replication definition for replicating from a source that is outside the current Kubernetes cluster -to a destination that is in the same Kubernetes cluster: - -```yaml -apiVersion: "replication.database.arangodb.com/v1alpha" -kind: "ArangoDeploymentReplication" -metadata: - name: "replication-from-a-to-b" -spec: - source: - masterEndpoint: ["https://163.172.149.229:31888", "https://51.15.225.110:31888", "https://51.15.229.133:31888"] - auth: - keyfileSecretName: cluster-a-sync-auth - tls: - caSecretName: cluster-a-sync-ca - destination: - deploymentName: cluster-b -``` - -This definition results in: - -- the arangosync `SyncMaster` in deployment `cluster-b` is called to configure a synchronization - from the syncmasters located at the given list of endpoint URLs to the syncmasters `cluster-b`, - using the client authentication certificate stored in `Secret` `cluster-a-sync-auth`. - To access `cluster-a`, the keyfile (containing a client authentication certificate) is used. - To access `cluster-b`, the JWT secret found in the deployment of `cluster-b` is used. - -## Specification reference - -Below you'll find all settings of the `ArangoDeploymentReplication` custom resource. - -### `spec.source.deploymentName: string` - -This setting specifies the name of an `ArangoDeployment` resource that runs a cluster -with sync enabled. - -This cluster configured as the replication source. - -### `spec.source.masterEndpoint: []string` - -This setting specifies zero or more master endpoint URLs of the source cluster. - -Use this setting if the source cluster is not running inside a Kubernetes cluster -that is reachable from the Kubernetes cluster the `ArangoDeploymentReplication` resource is deployed in. - -Specifying this setting and `spec.source.deploymentName` at the same time is not allowed. - -### `spec.source.auth.keyfileSecretName: string` - -This setting specifies the name of a `Secret` containing a client authentication certificate called `tls.keyfile` used to authenticate -with the SyncMaster at the specified source. - -If `spec.source.auth.userSecretName` has not been set, -the client authentication certificate found in the secret with this name is also used to configure -the synchronization and fetch the synchronization status. - -This setting is required. - -### `spec.source.auth.userSecretName: string` - -This setting specifies the name of a `Secret` containing a `username` & `password` used to authenticate -with the SyncMaster at the specified source in order to configure synchronization and fetch synchronization status. - -The user identified by the username must have write access in the `_system` database of the source ArangoDB cluster. - -### `spec.source.tls.caSecretName: string` - -This setting specifies the name of a `Secret` containing a TLS CA certificate `ca.crt` used to verify -the TLS connection created by the SyncMaster at the specified source. - -This setting is required, unless `spec.source.deploymentName` has been set. - -### `spec.destination.deploymentName: string` - -This setting specifies the name of an `ArangoDeployment` resource that runs a cluster -with sync enabled. - -This cluster configured as the replication destination. - -### `spec.destination.masterEndpoint: []string` - -This setting specifies zero or more master endpoint URLs of the destination cluster. - -Use this setting if the destination cluster is not running inside a Kubernetes cluster -that is reachable from the Kubernetes cluster the `ArangoDeploymentReplication` resource is deployed in. - -Specifying this setting and `spec.destination.deploymentName` at the same time is not allowed. - -### `spec.destination.auth.keyfileSecretName: string` - -This setting specifies the name of a `Secret` containing a client authentication certificate called `tls.keyfile` used to authenticate -with the SyncMaster at the specified destination. - -If `spec.destination.auth.userSecretName` has not been set, -the client authentication certificate found in the secret with this name is also used to configure -the synchronization and fetch the synchronization status. - -This setting is required, unless `spec.destination.deploymentName` or `spec.destination.auth.userSecretName` has been set. - -Specifying this setting and `spec.destination.userSecretName` at the same time is not allowed. - -### `spec.destination.auth.userSecretName: string` - -This setting specifies the name of a `Secret` containing a `username` & `password` used to authenticate -with the SyncMaster at the specified destination in order to configure synchronization and fetch synchronization status. - -The user identified by the username must have write access in the `_system` database of the destination ArangoDB cluster. - -Specifying this setting and `spec.destination.keyfileSecretName` at the same time is not allowed. - -### `spec.destination.tls.caSecretName: string` - -This setting specifies the name of a `Secret` containing a TLS CA certificate `ca.crt` used to verify -the TLS connection created by the SyncMaster at the specified destination. - -This setting is required, unless `spec.destination.deploymentName` has been set. - -## Authentication details - -The authentication settings in a `ArangoDeploymentReplication` resource are used for two distinct purposes. - -The first use is the authentication of the syncmasters at the destination with the syncmasters at the source. -This is always done using a client authentication certificate which is found in a `tls.keyfile` field -in a secret identified by `spec.source.auth.keyfileSecretName`. - -The second use is the authentication of the ArangoDB Replication operator with the syncmasters at the source -or destination. These connections are made to configure synchronization, stop configuration and fetch the status -of the configuration. -The method used for this authentication is derived as follows (where `X` is either `source` or `destination`): - -- If `spec.X.userSecretName` is set, the username + password found in the `Secret` identified by this name is used. -- If `spec.X.keyfileSecretName` is set, the client authentication certificate (keyfile) found in the `Secret` identifier by this name is used. -- If `spec.X.deploymentName` is set, the JWT secret found in the deployment is used. - -## Creating client authentication certificate keyfiles - -The client authentication certificates needed for the `Secrets` identified by `spec.source.auth.keyfileSecretName` & `spec.destination.auth.keyfileSecretName` -are normal ArangoDB keyfiles that can be created by the `arangosync create client-auth keyfile` command. -In order to do so, you must have access to the client authentication CA of the source/destination. - -If the client authentication CA at the source/destination also contains a private key (`ca.key`), the ArangoDeployment operator -can be used to create such a keyfile for you, without the need to have `arangosync` installed locally. -Read the following paragraphs for instructions on how to do that. - -## Creating and using access packages - -An access package is a YAML file that contains: - -- A client authentication certificate, wrapped in a `Secret` in a `tls.keyfile` data field. -- A TLS certificate authority public key, wrapped in a `Secret` in a `ca.crt` data field. - -The format of the access package is such that it can be inserted into a Kubernetes cluster using the standard `kubectl` tool. - -To create an access package that can be used to authenticate with the ArangoDB SyncMasters of an `ArangoDeployment`, -add a name of a non-existing `Secret` to the `spec.sync.externalAccess.accessPackageSecretNames` field of the `ArangoDeployment`. -In response, a `Secret` is created in that Kubernetes cluster, with the given name, that contains a `accessPackage.yaml` data field -that contains a Kubernetes resource specification that can be inserted into the other Kubernetes cluster. - -The process for creating and using an access package for authentication at the source cluster is as follows: - -- Edit the `ArangoDeployment` resource of the source cluster, set `spec.sync.externalAccess.accessPackageSecretNames` to `["my-access-package"]` -- Wait for the `ArangoDeployment` operator to create a `Secret` named `my-access-package`. -- Extract the access package from the Kubernetes source cluster using: - -```bash -kubectl get secret my-access-package --template='{{index .data "accessPackage.yaml"}}' | base64 -D > accessPackage.yaml -``` - -- Insert the secrets found in the access package in the Kubernetes destination cluster using: - -```bash -kubectl apply -f accessPackage.yaml -``` - -As a result, the destination Kubernetes cluster will have 2 additional `Secrets`. One contains a client authentication certificate -formatted as a keyfile. Another contains the public key of the TLS CA certificate of the source cluster. diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/DeploymentResource.md b/Documentation/Books/Manual/Deployment/Kubernetes/DeploymentResource.md deleted file mode 100644 index cd6c0f4c1f48..000000000000 --- a/Documentation/Books/Manual/Deployment/Kubernetes/DeploymentResource.md +++ /dev/null @@ -1,422 +0,0 @@ - -# ArangoDeployment Custom Resource - -The ArangoDB Deployment Operator creates and maintains ArangoDB deployments -in a Kubernetes cluster, given a deployment specification. -This deployment specification is a `CustomResource` following -a `CustomResourceDefinition` created by the operator. - -Example minimal deployment definition of an ArangoDB database cluster: - -```yaml -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "example-arangodb-cluster" -spec: - mode: Cluster -``` - -Example more elaborate deployment definition: - -```yaml -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "example-arangodb-cluster" -spec: - mode: Cluster - environment: Production - agents: - count: 3 - args: - - --log.level=debug - resources: - requests: - storage: 8Gi - storageClassName: ssd - dbservers: - count: 5 - resources: - requests: - storage: 80Gi - storageClassName: ssd - coordinators: - count: 3 - image: "arangodb/arangodb:3.3.4" -``` - -## Specification reference - -Below you'll find all settings of the `ArangoDeployment` custom resource. -Several settings are for various groups of servers. These are indicated -with `` where `` can be any of: - -- `agents` for all agents of a `Cluster` or `ActiveFailover` pair. -- `dbservers` for all dbservers of a `Cluster`. -- `coordinators` for all coordinators of a `Cluster`. -- `single` for all single servers of a `Single` instance or `ActiveFailover` pair. -- `syncmasters` for all syncmasters of a `Cluster`. -- `syncworkers` for all syncworkers of a `Cluster`. - -### `spec.mode: string` - -This setting specifies the type of deployment you want to create. -Possible values are: - -- `Cluster` (default) Full cluster. Defaults to 3 agents, 3 dbservers & 3 coordinators. -- `ActiveFailover` Active-failover single pair. Defaults to 3 agents and 2 single servers. -- `Single` Single server only (note this does not provide high availability or reliability). - -This setting cannot be changed after the deployment has been created. - -### `spec.environment: string` - -This setting specifies the type of environment in which the deployment is created. -Possible values are: - -- `Development` (default) This value optimizes the deployment for development - use. It is possible to run a deployment on a small number of nodes (e.g. minikube). -- `Production` This value optimizes the deployment for production use. - It puts required affinity constraints on all pods to avoid agents & dbservers - from running on the same machine. - -### `spec.image: string` - -This setting specifies the docker image to use for all ArangoDB servers. -In a `development` environment this setting defaults to `arangodb/arangodb:latest`. -For `production` environments this is a required setting without a default value. -It is highly recommend to use explicit version (not `latest`) for production -environments. - -### `spec.imagePullPolicy: string` - -This setting specifies the pull policy for the docker image to use for all ArangoDB servers. -Possible values are: - -- `IfNotPresent` (default) to pull only when the image is not found on the node. -- `Always` to always pull the image before using it. - -### `spec.storageEngine: string` - -This setting specifies the type of storage engine used for all servers -in the cluster. -Possible values are: - -- `MMFiles` To use the MMFiles storage engine. -- `RocksDB` (default) To use the RocksDB storage engine. - -This setting cannot be changed after the cluster has been created. - -### `spec.downtimeAllowed: bool` - -This setting is used to allow automatic reconciliation actions that yield -some downtime of the ArangoDB deployment. -When this setting is set to `false` (the default), no automatic action that -may result in downtime is allowed. -If the need for such an action is detected, an event is added to the `ArangoDeployment`. - -Once this setting is set to `true`, the automatic action is executed. - -Operations that may result in downtime are: - -- Rotating TLS CA certificate - -Note: It is still possible that there is some downtime when the Kubernetes -cluster is down, or in a bad state, irrespective of the value of this setting. - -### `spec.rocksdb.encryption.keySecretName` - -This setting specifies the name of a Kubernetes `Secret` that contains -an encryption key used for encrypting all data stored by ArangoDB servers. -When an encryption key is used, encryption of the data in the cluster is enabled, -without it encryption is disabled. -The default value is empty. - -This requires the Enterprise version. - -The encryption key cannot be changed after the cluster has been created. - -The secret specified by this setting, must have a data field named 'key' containing -an encryption key that is exactly 32 bytes long. - -### `spec.externalAccess.type: string` - -This setting specifies the type of `Service` that will be created to provide -access to the ArangoDB deployment from outside the Kubernetes cluster. -Possible values are: - -- `None` To limit access to application running inside the Kubernetes cluster. -- `LoadBalancer` To create a `Service` of type `LoadBalancer` for the ArangoDB deployment. -- `NodePort` To create a `Service` of type `NodePort` for the ArangoDB deployment. -- `Auto` (default) To create a `Service` of type `LoadBalancer` and fallback to a `Service` or type `NodePort` when the - `LoadBalancer` is not assigned an IP address. - -### `spec.externalAccess.loadBalancerIP: string` - -This setting specifies the IP used to for the LoadBalancer to expose the ArangoDB deployment on. -This setting is used when `spec.externalAccess.type` is set to `LoadBalancer` or `Auto`. - -If you do not specify this setting, an IP will be chosen automatically by the load-balancer provisioner. - -### `spec.externalAccess.nodePort: int` - -This setting specifies the port used to expose the ArangoDB deployment on. -This setting is used when `spec.externalAccess.type` is set to `NodePort` or `Auto`. - -If you do not specify this setting, a random port will be chosen automatically. - -### `spec.externalAccess.advertisedEndpoint: string` - -This setting specifies the advertised endpoint for all coordinators. - -### `spec.auth.jwtSecretName: string` - -This setting specifies the name of a kubernetes `Secret` that contains -the JWT token used for accessing all ArangoDB servers. -When no name is specified, it defaults to `-jwt`. -To disable authentication, set this value to `None`. - -If you specify a name of a `Secret`, that secret must have the token -in a data field named `token`. - -If you specify a name of a `Secret` that does not exist, a random token is created -and stored in a `Secret` with given name. - -Changing a JWT token results in stopping the entire cluster -and restarting it. - -### `spec.tls.caSecretName: string` - -This setting specifies the name of a kubernetes `Secret` that contains -a standard CA certificate + private key used to sign certificates for individual -ArangoDB servers. -When no name is specified, it defaults to `-ca`. -To disable authentication, set this value to `None`. - -If you specify a name of a `Secret` that does not exist, a self-signed CA certificate + key is created -and stored in a `Secret` with given name. - -The specified `Secret`, must contain the following data fields: - -- `ca.crt` PEM encoded public key of the CA certificate -- `ca.key` PEM encoded private key of the CA certificate - -### `spec.tls.altNames: []string` - -This setting specifies a list of alternate names that will be added to all generated -certificates. These names can be DNS names or email addresses. -The default value is empty. - -### `spec.tls.ttl: duration` - -This setting specifies the time to live of all generated -server certificates. -The default value is `2160h` (about 3 month). - -When the server certificate is about to expire, it will be automatically replaced -by a new one and the affected server will be restarted. - -Note: The time to live of the CA certificate (when created automatically) -will be set to 10 years. - -### `spec.sync.enabled: bool` - -This setting enables/disables support for data center 2 data center -replication in the cluster. When enabled, the cluster will contain -a number of `syncmaster` & `syncworker` servers. -The default value is `false`. - -### `spec.sync.externalAccess.type: string` - -This setting specifies the type of `Service` that will be created to provide -access to the ArangoSync syncMasters from outside the Kubernetes cluster. -Possible values are: - -- `None` To limit access to applications running inside the Kubernetes cluster. -- `LoadBalancer` To create a `Service` of type `LoadBalancer` for the ArangoSync SyncMasters. -- `NodePort` To create a `Service` of type `NodePort` for the ArangoSync SyncMasters. -- `Auto` (default) To create a `Service` of type `LoadBalancer` and fallback to a `Service` or type `NodePort` when the - `LoadBalancer` is not assigned an IP address. - -Note that when you specify a value of `None`, a `Service` will still be created, but of type `ClusterIP`. - -### `spec.sync.externalAccess.loadBalancerIP: string` - -This setting specifies the IP used for the LoadBalancer to expose the ArangoSync SyncMasters on. -This setting is used when `spec.sync.externalAccess.type` is set to `LoadBalancer` or `Auto`. - -If you do not specify this setting, an IP will be chosen automatically by the load-balancer provisioner. - -### `spec.sync.externalAccess.nodePort: int` - -This setting specifies the port used to expose the ArangoSync SyncMasters on. -This setting is used when `spec.sync.externalAccess.type` is set to `NodePort` or `Auto`. - -If you do not specify this setting, a random port will be chosen automatically. - -### `spec.sync.externalAccess.masterEndpoint: []string` - -This setting specifies the master endpoint(s) advertised by the ArangoSync SyncMasters. -If not set, this setting defaults to: - -- If `spec.sync.externalAccess.loadBalancerIP` is set, it defaults to `https://:<8629>`. -- Otherwise it defaults to `https://:<8629>`. - -### `spec.sync.externalAccess.accessPackageSecretNames: []string` - -This setting specifies the names of zero of more `Secrets` that will be created by the deployment -operator containing "access packages". An access package contains those `Secrets` that are needed -to access the SyncMasters of this `ArangoDeployment`. - -By removing a name from this setting, the corresponding `Secret` is also deleted. -Note that to remove all access packages, leave an empty array in place (`[]`). -Completely removing the setting results in not modifying the list. - -See [the `ArangoDeploymentReplication` specification](./DeploymentReplicationResource.md) for more information -on access packages. - -### `spec.sync.auth.jwtSecretName: string` - -This setting specifies the name of a kubernetes `Secret` that contains -the JWT token used for accessing all ArangoSync master servers. -When not specified, the `spec.auth.jwtSecretName` value is used. - -If you specify a name of a `Secret` that does not exist, a random token is created -and stored in a `Secret` with given name. - -### `spec.sync.auth.clientCASecretName: string` - -This setting specifies the name of a kubernetes `Secret` that contains -a PEM encoded CA certificate used for client certificate verification -in all ArangoSync master servers. -This is a required setting when `spec.sync.enabled` is `true`. -The default value is empty. - -### `spec.sync.mq.type: string` - -This setting sets the type of message queue used by ArangoSync. -Possible values are: - -- `Direct` (default) for direct HTTP connections between the 2 data centers. - -### `spec.sync.tls.caSecretName: string` - -This setting specifies the name of a kubernetes `Secret` that contains -a standard CA certificate + private key used to sign certificates for individual -ArangoSync master servers. - -When no name is specified, it defaults to `-sync-ca`. - -If you specify a name of a `Secret` that does not exist, a self-signed CA certificate + key is created -and stored in a `Secret` with given name. - -The specified `Secret`, must contain the following data fields: - -- `ca.crt` PEM encoded public key of the CA certificate -- `ca.key` PEM encoded private key of the CA certificate - -### `spec.sync.tls.altNames: []string` - -This setting specifies a list of alternate names that will be added to all generated -certificates. These names can be DNS names or email addresses. -The default value is empty. - -### `spec.sync.monitoring.tokenSecretName: string` - -This setting specifies the name of a kubernetes `Secret` that contains -the bearer token used for accessing all monitoring endpoints of all ArangoSync -servers. -When not specified, no monitoring token is used. -The default value is empty. - -### `spec.disableIPv6: bool` - -This setting prevents the use of IPv6 addresses by ArangoDB servers. -The default is `false`. - -This setting cannot be changed after the deployment has been created. - -### `spec.license.secretName: string` - -This setting specifies the name of a kubernetes `Secret` that contains -the license key token used for enterprise images. This value is not used for -the community edition. - -### `spec..count: number` - -This setting specifies the number of servers to start for the given group. -For the agent group, this value must be a positive, odd number. -The default value is `3` for all groups except `single` (there the default is `1` -for `spec.mode: Single` and `2` for `spec.mode: ActiveFailover`). - -For the `syncworkers` group, it is highly recommended to use the same number -as for the `dbservers` group. - -### `spec..minCount: number` - -Specifies a minimum for the count of servers. If set, a specification is invalid if `count < minCount`. - -### `spec..maxCount: number` - -Specifies a maximum for the count of servers. If set, a specification is invalid if `count > maxCount`. - -### `spec..args: []string` - -This setting specifies additional commandline arguments passed to all servers of this group. -The default value is an empty array. - -### `spec..resources.requests.cpu: cpuUnit` - -This setting specifies the amount of CPU requested by server of this group. - -See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ for details. - -### `spec..resources.requests.memory: memoryUnit` - -This setting specifies the amount of memory requested by server of this group. - -See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ for details. - -### `spec..resources.requests.storage: storageUnit` - -This setting specifies the amount of storage required for each server of this group. -The default value is `8Gi`. - -This setting is not available for group `coordinators`, `syncmasters` & `syncworkers` -because servers in these groups do not need persistent storage. - -### `spec..serviceAccountName: string` - -This setting specifies the `serviceAccountName` for the `Pods` created -for each server of this group. - -Using an alternative `ServiceAccount` is typically used to separate access rights. -The ArangoDB deployments do not require any special rights. - -### `spec..storageClassName: string` - -This setting specifies the `storageClass` for the `PersistentVolume`s created -for each server of this group. - -This setting is not available for group `coordinators`, `syncmasters` & `syncworkers` -because servers in these groups do not need persistent storage. - -### `spec..tolerations: []Toleration` - -This setting specifies the `tolerations` for the `Pod`s created -for each server of this group. - -By default, suitable tolerations are set for the following keys with the `NoExecute` effect: - -- `node.kubernetes.io/not-ready` -- `node.kubernetes.io/unreachable` -- `node.alpha.kubernetes.io/unreachable` (will be removed in future version) - -For more information on tolerations, consult the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/). - -### `spec..nodeSelector: map[string]string` - -This setting specifies a set of labels to be used as `nodeSelector` for Pods of this node. - -For more information on node selectors, consult the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/). diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/Drain.md b/Documentation/Books/Manual/Deployment/Kubernetes/Drain.md deleted file mode 100644 index 42cc3e62ba38..000000000000 --- a/Documentation/Books/Manual/Deployment/Kubernetes/Drain.md +++ /dev/null @@ -1,456 +0,0 @@ - -# Draining Kubernetes nodes - -{% hint 'danger' %} -If Kubernetes nodes with ArangoDB pods on them are drained without care -data loss can occur! The recommended procedure is described below. -{% endhint %} - -For maintenance work in k8s it is sometimes necessary to drain a k8s node, -which means removing all pods from it. Kubernetes offers a standard API -for this and our operator supports this - to the best of its ability. - -Draining nodes is easy enough for stateless services, which can simply be -re-launched on any other node. However, for a stateful service this -operation is more difficult, and as a consequence more costly and there -are certain risks involved, if the operation is not done carefully -enough. To put it simply, the operator must first move all the data -stored on the node (which could be in a locally attached disk) to -another machine, before it can shut down the pod gracefully. Moving data -takes time, and even after the move, the distributed system ArangoDB has -to recover from this change, for example by ensuring data synchronicity -between the replicas in their new location. - -Therefore, a systematic drain of all k8s nodes in sequence has to follow -a careful procedure, in particular to ensure that ArangoDB is ready to -move to the next step. This is necessary to avoid catastrophic data -loss, and is simply the price one pays for running a stateful service. - -## Anatomy of a drain procedure in k8s: the grace period - -When a `kubectl drain` operation is triggered for a node, k8s first -checks if there are any pods with local data on disk. Our ArangoDB pods have -this property (the _Coordinators_ do use `EmptyDir` volumes, and _Agents_ -and _DBServers_ could have persistent volumes which are actually stored on -a locally attached disk), so one has to override this with the -`--delete-local-data=true` option. - -Furthermore, quite often, the node will contain pods which are managed -by a `DaemonSet` (which is not the case for ArangoDB), which makes it -necessary to override this check with the `--ignore-daemonsets=true` -option. - -Finally, it is checked if the node has any pods which are not managed by -anything, either by k8s itself (`ReplicationController`, `ReplicaSet`, -`Job`, `DaemonSet` or `StatefulSet`) or by an operator. If this is the -case, the drain operation will be refused, unless one uses the option -`--force=true`. Since the ArangoDB operator manages our pods, we do not -have to use this option for ArangoDB, but you might have to use it for -other pods. - -If all these checks have been overcome, k8s proceeds as follows: All -pods are notified about this event and are put into a `Terminating` -state. During this time, they have a chance to take action, or indeed -the operator managing them has. In particular, although the pods get -termination notices, they can keep running until the operator has -removed all _finalizers_. This gives the operator a chance to sort out -things, for example in our case to move data away from the pod. - -However, there is a limit to this tolerance by k8s, and that is the -grace period. If the grace period has passed but the pod has not -actually terminated, then it is killed the hard way. If this happens, -the operator has no chance but to remove the pod, drop its persistent -volume claim and persistent volume. This will obviously lead to a -failure incident in ArangoDB and must be handled by fail-over management. -Therefore, **this event should be avoided**. - -## Things to check in ArangoDB before a node drain - -There are basically two things one should check in an ArangoDB cluster -before a node drain operation can be started: - - 1. All cluster nodes are up and running and healthy. - 2. For all collections and shards all configured replicas are in sync. - -{% hint 'warning' %} -If any cluster node is unhealthy, there is an increased risk that the -system does not have enough resources to cope with a failure situation. - -If any shard replicas are not currently in sync, then there is a serious -risk that the cluster is currently not as resilient as expected. -{% endhint %} - -One possibility to verify these two things is via the ArangoDB web interface. -Node health can be monitored in the _Overview_ tab under _NODES_: - -![Cluster Health Screen](./HealthyCluster.png) - -**Check that all nodes are green** and that there is **no node error** in the -top right corner. - -As to the shards being in sync, see the _Shards_ tab under _NODES_: - -![Shard Screen](./ShardsInSync.png) - -**Check that all collections have a green check mark** on the right side. -If any collection does not have such a check mark, you can click on the -collection and see the details about shards. Please keep in -mind that this has to be done **for each database** separately! - -Obviously, this might be tedious and calls for automation. Therefore, there -are APIs for this. The first one is [Cluster Health](../../../HTTP/Cluster/Health.html): - -``` -POST /_admin/cluster/health -``` - -… which returns a JSON document looking like this: - -```JSON -{ - "Health": { - "CRDN-rxtu5pku": { - "Endpoint": "ssl://my-arangodb-cluster-coordinator-rxtu5pku.my-arangodb-cluster-int.default.svc:8529", - "LastAckedTime": "2019-02-20T08:09:22Z", - "SyncTime": "2019-02-20T08:09:21Z", - "Version": "3.4.2-1", - "Engine": "rocksdb", - "ShortName": "Coordinator0002", - "Timestamp": "2019-02-20T08:09:22Z", - "Status": "GOOD", - "SyncStatus": "SERVING", - "Host": "my-arangodb-cluster-coordinator-rxtu5pku.my-arangodb-cluster-int.default.svc", - "Role": "Coordinator", - "CanBeDeleted": false - }, - "PRMR-wbsq47rz": { - "LastAckedTime": "2019-02-21T09:14:24Z", - "Endpoint": "ssl://my-arangodb-cluster-dbserver-wbsq47rz.my-arangodb-cluster-int.default.svc:8529", - "SyncTime": "2019-02-21T09:14:24Z", - "Version": "3.4.2-1", - "Host": "my-arangodb-cluster-dbserver-wbsq47rz.my-arangodb-cluster-int.default.svc", - "Timestamp": "2019-02-21T09:14:24Z", - "Status": "GOOD", - "SyncStatus": "SERVING", - "Engine": "rocksdb", - "ShortName": "DBServer0006", - "Role": "DBServer", - "CanBeDeleted": false - }, - "AGNT-wrqmwpuw": { - "Endpoint": "ssl://my-arangodb-cluster-agent-wrqmwpuw.my-arangodb-cluster-int.default.svc:8529", - "Role": "Agent", - "CanBeDeleted": false, - "Version": "3.4.2-1", - "Engine": "rocksdb", - "Leader": "AGNT-oqohp3od", - "Status": "GOOD", - "LastAckedTime": 0.312 - }, - ... [some more entries, one for each instance] - }, - "ClusterId": "210a0536-fd28-46de-b77f-e8882d6d7078", - "error": false, - "code": 200 -} -``` - -Check that each instance has a `Status` field with the value `"GOOD"`. -Here is a shell command which makes this check easy, using the -[`jq` JSON pretty printer](https://stedolan.github.io/jq/): - -```bash -curl -k https://arangodb.9hoeffer.de:8529/_admin/cluster/health --user root: | jq . | grep '"Status"' | grep -v '"GOOD"' -``` - -For the shards being in sync there is the -[Cluster Inventory](../../../HTTP/Replications/ReplicationDump.html#return-cluster-inventory-of-collections-and-indexes) -API call: - -``` -POST /_db/_system/_api/replication/clusterInventory -``` - -… which returns a JSON body like this: - -```JSON -{ - "collections": [ - { - "parameters": { - "cacheEnabled": false, - "deleted": false, - "globallyUniqueId": "c2010061/", - "id": "2010061", - "isSmart": false, - "isSystem": false, - "keyOptions": { - "allowUserKeys": true, - "type": "traditional" - }, - "name": "c", - "numberOfShards": 6, - "planId": "2010061", - "replicationFactor": 2, - "shardKeys": [ - "_key" - ], - "shardingStrategy": "hash", - "shards": { - "s2010066": [ - "PRMR-vzeebvwf", - "PRMR-e6hbjob1" - ], - "s2010062": [ - "PRMR-e6hbjob1", - "PRMR-vzeebvwf" - ], - "s2010065": [ - "PRMR-e6hbjob1", - "PRMR-vzeebvwf" - ], - "s2010067": [ - "PRMR-vzeebvwf", - "PRMR-e6hbjob1" - ], - "s2010064": [ - "PRMR-vzeebvwf", - "PRMR-e6hbjob1" - ], - "s2010063": [ - "PRMR-e6hbjob1", - "PRMR-vzeebvwf" - ] - }, - "status": 3, - "type": 2, - "waitForSync": false - }, - "indexes": [], - "planVersion": 132, - "isReady": true, - "allInSync": true - }, - ... [more collections following] - ], - "views": [], - "tick": "38139421", - "state": "unused" -} -``` - -Check that for all collections the attributes `"isReady"` and `"allInSync"` -both have the value `true`. Note that it is necessary to do this for all -databases! - -Here is a shell command which makes this check easy: - -```bash -curl -k https://arangodb.9hoeffer.de:8529/_db/_system/_api/replication/clusterInventory --user root: | jq . | grep '"isReady"\|"allInSync"' | sort | uniq -c -``` - -If all these checks are performed and are okay, then it is safe to -continue with the clean out and drain procedure as described below. - -{% hint 'danger' %} -If there are some collections with `replicationFactor` set to -1, the system is not resilient and cannot tolerate the failure of even a -single server! One can still perform a drain operation in this case, but -if anything goes wrong, in particular if the grace period is chosen too -short and a pod is killed the hard way, data loss can happen. -{% endhint %} - -If all `replicationFactor`s of all collections are at least 2, then the -system can tolerate the failure of a single _DBserver_. If you have set -the `Environment` to `Production` in the specs of the ArangoDB -deployment, you will only ever have one _DBserver_ on each k8s node and -therefore the drain operation is relatively safe, even if the grace -period is chosen too small. - -Furthermore, we recommend to have one k8s node more than _DBservers_ in -you cluster, such that the deployment of a replacement _DBServer_ can -happen quickly and not only after the maintenance work on the drained -node has been completed. However, with the necessary care described -below, the procedure should also work without this. - -Finally, one should **not run a rolling upgrade or restart operation** -at the time of a node drain. - -## Clean out a DBserver manually - -In this step we clean out a _DBServer_ manually, **before issuing the -`kubectl drain` command**. Previously, we have denoted this step as optional, -but for safety reasons, we consider it mandatory now, since it is near -impossible to choose the grace period long enough in a reliable way. - -Furthermore, if this step is not performed, we must choose -the grace period long enough to avoid any risk, as explained in the -previous section. However, this has a disadvantage which has nothing to -do with ArangoDB: We have observed, that some k8s internal services like -`fluentd` and some DNS services will always wait for the full grace -period to finish a node drain. Therefore, the node drain operation will -always take as long as the grace period. Since we have to choose this -grace period long enough for ArangoDB to move all data on the _DBServer_ -pod away to some other node, this can take a considerable amount of -time, depending on the size of the data you keep in ArangoDB. - -Therefore it is more time-efficient to perform the clean-out operation -beforehand. One can observe completion and as soon as it is completed -successfully, we can then issue the drain command with a relatively -small grace period and still have a nearly risk-free procedure. - -To clean out a _DBServer_ manually, we have to use this API: - -``` -POST /_admin/cluster/cleanOutServer -``` - -… and send as body a JSON document like this: - -```JSON -{"server":"DBServer0006"} -``` - -The value of the `"server"` attribute should be the name of the DBserver -which is the one in the pod which resides on the node that shall be -drained next. This uses the UI short name (`ShortName` in the -`/_admin/cluster/health` API), alternatively one can use the -internal name, which corresponds to the pod name. In our example, the -pod name is: - -``` -my-arangodb-cluster-prmr-wbsq47rz-5676ed -``` - -… where `my-arangodb-cluster` is the ArangoDB deployment name, therefore -the internal name of the _DBserver_ is `PRMR-wbsq47rz`. Note that `PRMR` -must be all capitals since pod names are always all lower case. So, we -could use the body: - -```JSON -{"server":"PRMR-wbsq47rz"} -``` - -You can use this command line to achieve this: - -```bash -curl -k https://arangodb.9hoeffer.de:8529/_admin/cluster/cleanOutServer --user root: -d '{"server":"PRMR-wbsq47rz"}' -``` - -The API call will return immediately with a body like this: - -```JSON -{"error":false,"id":"38029195","code":202} -``` - -The given `id` in this response can be used to query the outcome or -completion status of the clean out server job with this API: - -``` -GET /_admin/cluster/queryAgencyJob?id=38029195 -``` - -… which will return a body like this: - -```JSON -{ - "error": false, - "id": "38029195", - "status": "Pending", - "job": { - "timeCreated": "2019-02-21T10:42:14.727Z", - "server": "PRMR-wbsq47rz", - "timeStarted": "2019-02-21T10:42:15Z", - "type": "cleanOutServer", - "creator": "CRDN-rxtu5pku", - "jobId": "38029195" - }, - "code": 200 -} -``` - -Use this command line to check progress: - -```bash -curl -k https://arangodb.9hoeffer.de:8529/_admin/cluster/queryAgencyJob?id=38029195 --user root: -``` - -It indicates that the job is still ongoing (`"Pending"`). As soon as -the job has completed, the answer will be: - -```JSON -{ - "error": false, - "id": "38029195", - "status": "Finished", - "job": { - "timeCreated": "2019-02-21T10:42:14.727Z", - "server": "PRMR-e6hbjob1", - "jobId": "38029195", - "timeStarted": "2019-02-21T10:42:15Z", - "timeFinished": "2019-02-21T10:45:39Z", - "type": "cleanOutServer", - "creator": "CRDN-rxtu5pku" - }, - "code": 200 -} -``` - -From this moment on the _DBserver_ can no longer be used to move -shards to. At the same time, it will no longer hold any data of the -cluster. - -Now the drain operation involving a node with this pod on it is -completely risk-free, even with a small grace period. - -## Performing the drain - -After all above [checks before a node drain](#things-to-check-in-arangodb-before-a-node-drain) -and the [manual clean out of the DBServer](#clean-out-a-dbserver-manually) -have been done successfully, it is safe to perform the drain operation, similar to this command: - -```bash -kubectl drain gke-draintest-default-pool-394fe601-glts --delete-local-data --ignore-daemonsets --grace-period=300 -``` - -As described above, the options `--delete-local-data` for ArangoDB and -`--ignore-daemonsets` for other services have been added. A `--grace-period` of -300 seconds has been chosen because for this example we are confident that all the data on our _DBServer_ pod -can be moved to a different server within 5 minutes. Note that this is -**not saying** that 300 seconds will always be enough. Regardless of how -much data is stored in the pod, your mileage may vary, moving a terabyte -of data can take considerably longer! - -If the highly recommended step of -[cleaning out a DBserver manually](#clean-out-a-dbserver-manually) -has been performed beforehand, the grace period can easily be reduced to 60 -seconds - at least from the perspective of ArangoDB, since the server is already -cleaned out, so it can be dropped readily and there is still no risk. - -At the same time, this guarantees now that the drain is completed -approximately within a minute. - -## Things to check after a node drain - -After a node has been drained, there will usually be one of the -_DBservers_ gone from the cluster. As a replacement, another _DBServer_ has -been deployed on a different node, if there is a different node -available. If not, the replacement can only be deployed when the -maintenance work on the drained node has been completed and it is -uncordoned again. In this latter case, one should wait until the node is -back up and the replacement pod has been deployed there. - -After that, one should perform the same checks as described in -[things to check before a node drain](#things-to-check-in-arangodb-before-a-node-drain) -above. - -Finally, it is likely that the shard distribution in the "new" cluster -is not balanced out. In particular, the new _DBSserver_ is not automatically -used to store shards. We recommend to -[re-balance](../../Administration/Cluster/README.md#movingrebalancing-shards) the shard distribution, -either manually by moving shards or by using the _Rebalance Shards_ -button in the _Shards_ tab under _NODES_ in the web UI. This redistribution can take -some time again and progress can be monitored in the UI. - -After all this has been done, **another round of checks should be done** -before proceeding to drain the next node. diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/DriverConfiguration.md b/Documentation/Books/Manual/Deployment/Kubernetes/DriverConfiguration.md deleted file mode 100644 index b498675bf2e9..000000000000 --- a/Documentation/Books/Manual/Deployment/Kubernetes/DriverConfiguration.md +++ /dev/null @@ -1,135 +0,0 @@ - -# Configuring your driver for ArangoDB access - -In this chapter you'll learn how to configure a driver for accessing -an ArangoDB deployment in Kubernetes. - -The exact methods to configure a driver are specific to that driver. - -## Database endpoint(s) - -The endpoint(s) (or URLs) to communicate with is the most important -parameter your need to configure in your driver. - -Finding the right endpoints depend on wether your client application is running in -the same Kubernetes cluster as the ArangoDB deployment or not. - -### Client application in same Kubernetes cluster - -If your client application is running in the same Kubernetes cluster as -the ArangoDB deployment, you should configure your driver to use the -following endpoint: - -```text -https://..svc:8529 -``` - -Only if your deployment has set `spec.tls.caSecretName` to `None`, should -you use `http` instead of `https`. - -### Client application outside Kubernetes cluster - -If your client application is running outside the Kubernetes cluster in which -the ArangoDB deployment is running, your driver endpoint depends on the -external-access configuration of your ArangoDB deployment. - -If the external-access of the ArangoDB deployment is of type `LoadBalancer`, -then use the IP address of that `LoadBalancer` like this: - -```text -https://:8529 -``` - -If the external-access of the ArangoDB deployment is of type `NodePort`, -then use the IP address(es) of the `Nodes` of the Kubernetes cluster, -combined with the `NodePort` that is used by the external-access service. - -For example: - -```text -https://:30123 -``` - -You can find the type of external-access by inspecting the external-access `Service`. -To do so, run the following command: - -```bash -kubectl get service -n -ea -``` - -The output looks like this: - -```bash -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR -example-simple-cluster-ea LoadBalancer 10.106.175.38 192.168.10.208 8529:31890/TCP 1s app=arangodb,arango_deployment=example-simple-cluster,role=coordinator -``` - -In this case the external-access is of type `LoadBalancer` with a load-balancer IP address -of `192.168.10.208`. -This results in an endpoint of `https://192.168.10.208:8529`. - -## TLS settings - -As mentioned before the ArangoDB deployment managed by the ArangoDB operator -will use a secure (TLS) connection unless you set `spec.tls.caSecretName` to `None` -in your `ArangoDeployment`. - -When using a secure connection, you can choose to verify the server certificates -provides by the ArangoDB servers or not. - -If you want to verify these certificates, configure your driver with the CA certificate -found in a Kubernetes `Secret` found in the same namespace as the `ArangoDeployment`. - -The name of this `Secret` is stored in the `spec.tls.caSecretName` setting of -the `ArangoDeployment`. If you don't set this setting explicitly, it will be -set automatically. - -Then fetch the CA secret using the following command (or use a Kubernetes client library to fetch it): - -```bash -kubectl get secret -n --template='{{index .data "ca.crt"}}' | base64 -D > ca.crt -``` - -This results in a file called `ca.crt` containing a PEM encoded, x509 CA certificate. - -## Query requests - -For most client requests made by a driver, it does not matter if there is any -kind of load-balancer between your client application and the ArangoDB -deployment. - -{% hint 'info' %} -Note that even a simple `Service` of type `ClusterIP` already behaves as a -load-balancer. -{% endhint %} - -The exception to this is cursor-related requests made to an ArangoDB `Cluster` -deployment. The coordinator that handles an initial query request (that results -in a `Cursor`) will save some in-memory state in that coordinator, if the result -of the query is too big to be transfer back in the response of the initial -request. - -Follow-up requests have to be made to fetch the remaining data. These follow-up -requests must be handled by the same coordinator to which the initial request -was made. As soon as there is a load-balancer between your client application -and the ArangoDB cluster, it is uncertain which coordinator will receive the -follow-up request. - -ArangoDB will transparently forward any mismatched requests to the correct -coordinator, so the requests can be answered correctly without any additional -configuration. However, this incurs a small latency penalty due to the extra -request across the internal network. - -To prevent this uncertainty client-side, make sure to run your client -application in the same Kubernetes cluster and synchronize your endpoints before -making the initial query request. This will result in the use (by the driver) of -internal DNS names of all coordinators. A follow-up request can then be sent to -exactly the same coordinator. - -If your client application is running outside the Kubernetes cluster the easiest -way to work around it is by making sure that the query results are small enough -to be returned by a single request. When that is not feasible, it is also -possible to resolve this when the internal DNS names of your Kubernetes cluster -are exposed to your client application and the resulting IP addresses are -routable from your client application. To expose internal DNS names of your -Kubernetes cluster, your can use [CoreDNS](https://coredns.io). diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/HealthyCluster.png b/Documentation/Books/Manual/Deployment/Kubernetes/HealthyCluster.png deleted file mode 100644 index 2670e5bd2d77..000000000000 Binary files a/Documentation/Books/Manual/Deployment/Kubernetes/HealthyCluster.png and /dev/null differ diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/Helm.md b/Documentation/Books/Manual/Deployment/Kubernetes/Helm.md deleted file mode 100644 index 7bd93e2bb6b2..000000000000 --- a/Documentation/Books/Manual/Deployment/Kubernetes/Helm.md +++ /dev/null @@ -1,92 +0,0 @@ - -# Using the ArangoDB Kubernetes Operator with Helm - -[`Helm`](https://www.helm.sh/) is a package manager for Kubernetes, which enables -you to install various packages (include the ArangoDB Kubernetes Operator) -into your Kubernetes cluster. - -The benefit of `helm` (in the context of the ArangoDB Kubernetes Operator) -is that it allows for a lot of flexibility in how you install the operator. -For example you can install the operator in a namespace other than -`default`. - -## Charts - -The ArangoDB Kubernetes Operator is contained in two `helm` charts: - -- `kube-arangodb` which contains the operator for the `ArangoDeployment` - and `ArangoDeploymentReplication` resource types. -- `kube-arangodb-storage` which contains the operator for the `ArangoLocalStorage` - resource type. - -The `kube-arangodb-storage` only has to be installed if your Kubernetes cluster -does not already provide `StorageClasses` that use locally attached SSDs. - -## Configurable values for ArangoDB Kubernetes Operator - -The following values can be configured when installing the -ArangoDB Kubernetes Operator with `helm`. - -Values are passed to `helm` using an `--set==` argument passed -to the `helm install` or `helm upgrade` command. - -### Values applicable to both charts - -| Key | Type | Description -|-------------------|--------|-----| -| Image | string | Override the docker image used by the operators -| ImagePullPolicy | string | Override the image pull policy used by the operators. See [Updating Images](https://kubernetes.io/docs/concepts/containers/images/#updating-images) for details. -| RBAC.Create | bool | Set to `true` (default) to create roles & role bindings. - -### Values applicable to the `kube-arangodb` chart - -| Key | Type | Description -|-------------------|--------|-----| -| Deployment.Create | bool | Set to `true` (default) to deploy the `ArangoDeployment` operator -| Deployment.User.ServiceAccountName | string | Name of the `ServiceAccount` that is the subject of the `RoleBinding` of users of the `ArangoDeployment` operator -| Deployment.Operator.ServiceAccountName | string | Name of the `ServiceAccount` used to run the `ArangoDeployment` operator -| Deployment.Operator.ServiceType | string | Type of `Service` created for the dashboard of the `ArangoDeployment` operator -| Deployment.AllowChaos | bool | Set to `true` to allow the introduction of chaos. **Only use for testing, never for production!** Defaults to `false`. -| DeploymentReplication.Create | bool | Set to `true` (default) to deploy the `ArangoDeploymentReplication` operator -| DeploymentReplication.User.ServiceAccountName | string | Name of the `ServiceAccount` that is the subject of the `RoleBinding` of users of the `ArangoDeploymentReplication` operator -| DeploymentReplication.Operator.ServiceAccountName | string | Name of the `ServiceAccount` used to run the `ArangoDeploymentReplication` operator -| DeploymentReplication.Operator.ServiceType | string | Type of `Service` created for the dashboard of the `ArangoDeploymentReplication` operator - -### Values applicable to the `kube-arangodb-storage` chart - -| Key | Type | Description -|-------------------|--------|-----| -| Storage.User.ServiceAccountName | string | Name of the `ServiceAccount` that is the subject of the `RoleBinding` of users of the `ArangoLocalStorage` operator -| Storage.Operator.ServiceAccountName | string | Name of the `ServiceAccount` used to run the `ArangoLocalStorage` operator -| Storage.Operator.ServiceType | string | Type of `Service` created for the dashboard of the `ArangoLocalStorage` operator - -## Alternate namespaces - -The `kube-arangodb` chart supports deployment into a non-default namespace. - -To install the `kube-arangodb` chart is a non-default namespace, use the `--namespace` -argument like this. - -```bash -helm install --namespace=mynamespace kube-arangodb.tgz -``` - -Note that since the operators claim exclusive access to a namespace, you can -install the `kube-arangodb` chart in a namespace once. -You can install the `kube-arangodb` chart in multiple namespaces. To do so, run: - -```bash -helm install --namespace=namespace1 kube-arangodb.tgz -helm install --namespace=namespace2 kube-arangodb.tgz -``` - -The `kube-arangodb-storage` chart is always installed in the `kube-system` namespace. - -## Common problems - -### Error: no available release name found - -This error is given by `helm install ...` in some cases where it has -insufficient permissions to install charts. - -For various ways to work around this problem go to [this Stackoverflow article](https://stackoverflow.com/questions/43499971/helm-error-no-available-release-name-found). diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/Metrics.md b/Documentation/Books/Manual/Deployment/Kubernetes/Metrics.md deleted file mode 100644 index 7006f912d91d..000000000000 --- a/Documentation/Books/Manual/Deployment/Kubernetes/Metrics.md +++ /dev/null @@ -1,11 +0,0 @@ - -# Metrics - -The ArangoDB Kubernetes Operator (`kube-arangodb`) exposes metrics of -its operations in a format that is compatible with [Prometheus](https://prometheus.io). - -The metrics are exposed through HTTPS on port `8528` under path `/metrics`. - -Look at [examples/metrics](https://github.com/arangodb/kube-arangodb/tree/master/examples/metrics) -for examples of `Services` and `ServiceMonitors` you can use to integrate -with Prometheus through the [Prometheus-Operator by CoreOS](https://github.com/coreos/prometheus-operator). diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/README.md b/Documentation/Books/Manual/Deployment/Kubernetes/README.md deleted file mode 100644 index 8dc94b5d5050..000000000000 --- a/Documentation/Books/Manual/Deployment/Kubernetes/README.md +++ /dev/null @@ -1,22 +0,0 @@ - -# ArangoDB Kubernetes Operator - -The ArangoDB Kubernetes Operator (`kube-arangodb`) is a set of operators -that you deploy in your Kubernetes cluster to: - -- Manage deployments of the ArangoDB database -- Provide `PersistentVolumes` on local storage of your nodes for optimal storage performance. -- Configure ArangoDB Datacenter to Datacenter replication - -Each of these uses involves a different custom resource. - -- Use an [`ArangoDeployment` resource](./DeploymentResource.md) to - create an ArangoDB database deployment. -- Use an [`ArangoLocalStorage` resource](./StorageResource.md) to - provide local `PersistentVolumes` for optimal I/O performance. -- Use an [`ArangoDeploymentReplication` resource](./DeploymentReplicationResource.md) to - configure ArangoDB Datacenter to Datacenter replication. - -Continue with [Using the ArangoDB Kubernetes Operator](./Usage.md) -to learn how to install the ArangoDB Kubernetes operator and create -your first deployment. \ No newline at end of file diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/Scaling.md b/Documentation/Books/Manual/Deployment/Kubernetes/Scaling.md deleted file mode 100644 index c435a6429020..000000000000 --- a/Documentation/Books/Manual/Deployment/Kubernetes/Scaling.md +++ /dev/null @@ -1,22 +0,0 @@ - -# Scaling - -The ArangoDB Kubernetes Operator supports up and down scaling of -the number of dbservers & coordinators. - -Currently it is not possible to change the number of -agents of a cluster. - -The scale up or down, change the number of servers in the custom -resource. - -E.g. change `spec.dbservers.count` from `3` to `4`. - -Then apply the updated resource using: - -```bash -kubectl apply -f yourCustomResourceFile.yaml -``` - -Inspect the status of the custom resource to monitor -the progress of the scaling operation. diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/ServicesAndLoadBalancer.md b/Documentation/Books/Manual/Deployment/Kubernetes/ServicesAndLoadBalancer.md deleted file mode 100644 index 1a0439397b6c..000000000000 --- a/Documentation/Books/Manual/Deployment/Kubernetes/ServicesAndLoadBalancer.md +++ /dev/null @@ -1,126 +0,0 @@ - -# Services and load balancer - -The ArangoDB Kubernetes Operator will create services that can be used to -reach the ArangoDB servers from inside the Kubernetes cluster. - -By default, the ArangoDB Kubernetes Operator will also create an additional -service to reach the ArangoDB deployment from outside the Kubernetes cluster. - -For exposing the ArangoDB deployment to the outside, there are 2 options: - -- Using a `NodePort` service. This will expose the deployment on a specific port (above 30.000) - on all nodes of the Kubernetes cluster. -- Using a `LoadBalancer` service. This will expose the deployment on a load-balancer - that is provisioned by the Kubernetes cluster. - -The `LoadBalancer` option is the most convenient, but not all Kubernetes clusters -are able to provision a load-balancer. Therefore we offer a third (and default) option: `Auto`. -In this option, the ArangoDB Kubernetes Operator tries to create a `LoadBalancer` -service. It then waits for up to a minute for the Kubernetes cluster to provision -a load-balancer for it. If that has not happened after a minute, the service -is replaced by a service of type `NodePort`. - -To inspect the created service, run: - -```bash -kubectl get services -ea -``` - -To use the ArangoDB servers from outside the Kubernetes cluster -you have to add another service as explained below. - -## Services - -If you do not want the ArangoDB Kubernetes Operator to create an external-access -service for you, set `spec.externalAccess.Type` to `None`. - -If you want to create external access services manually, follow the instructions below. - -### Single server - -For a single server deployment, the operator creates a single -`Service` named ``. This service has a normal cluster IP -address. - -### Full cluster - -For a full cluster deployment, the operator creates two `Services`. - -- `-int` a headless `Service` intended to provide - DNS names for all pods created by the operator. - It selects all ArangoDB & ArangoSync servers in the cluster. - -- `` a normal `Service` that selects only the coordinators - of the cluster. This `Service` is configured with `ClientIP` session - affinity. This is needed for cursor requests, since they are bound to - a specific coordinator. - -When the coordinators are asked to provide endpoints of the cluster -(e.g. when calling `client.SynchronizeEndpoints()` in the go driver) -the DNS names of the individual `Pods` will be returned -(`.-int..svc`) - -### Full cluster with DC2DC - -For a full cluster with datacenter replication deployment, -the same `Services` are created as for a Full cluster, with the following -additions: - -- `-sync` a normal `Service` that selects only the syncmasters - of the cluster. - -## Load balancer - -If you want full control of the `Services` needed to access the ArangoDB deployment -from outside your Kubernetes cluster, set `spec.externalAccess.type` of the `ArangoDeployment` to `None` -and create a `Service` as specified below. - -Create a `Service` of type `LoadBalancer` or `NodePort`, depending on your -Kubernetes deployment. - -This service should select: - -- `arango_deployment: ` -- `role: coordinator` - -The following example yields a service of type `LoadBalancer` with a specific -load balancer IP address. -With this service, the ArangoDB cluster can now be reached on `https://1.2.3.4:8529`. - -```yaml -kind: Service -apiVersion: v1 -metadata: - name: arangodb-cluster-exposed -spec: - selector: - arango_deployment: arangodb-cluster - role: coordinator - type: LoadBalancer - loadBalancerIP: 1.2.3.4 - ports: - - protocol: TCP - port: 8529 - targetPort: 8529 -``` - -The following example yields a service of type `NodePort` with the ArangoDB -cluster exposed on port 30529 of all nodes of the Kubernetes cluster. - -```yaml -kind: Service -apiVersion: v1 -metadata: - name: arangodb-cluster-exposed -spec: - selector: - arango_deployment: arangodb-cluster - role: coordinator - type: NodePort - ports: - - protocol: TCP - port: 8529 - targetPort: 8529 - nodePort: 30529 -``` diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/ShardsInSync.png b/Documentation/Books/Manual/Deployment/Kubernetes/ShardsInSync.png deleted file mode 100644 index f013ba30a6de..000000000000 Binary files a/Documentation/Books/Manual/Deployment/Kubernetes/ShardsInSync.png and /dev/null differ diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/Storage.md b/Documentation/Books/Manual/Deployment/Kubernetes/Storage.md deleted file mode 100644 index 448ac7a6f09e..000000000000 --- a/Documentation/Books/Manual/Deployment/Kubernetes/Storage.md +++ /dev/null @@ -1,134 +0,0 @@ - -# Storage - -An ArangoDB cluster relies heavily on fast persistent storage. -The ArangoDB Kubernetes Operator uses `PersistentVolumeClaims` to deliver -the storage to Pods that need them. - -## Storage configuration - -In the `ArangoDeployment` resource, one can specify the type of storage -used by groups of servers using the `spec..storageClassName` -setting. - -This is an example of a `Cluster` deployment that stores its agent & dbserver -data on `PersistentVolumes` that use the `my-local-ssd` `StorageClass` - -```yaml -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "cluster-using-local-ssh" -spec: - mode: Cluster - agents: - storageClassName: my-local-ssd - dbservers: - storageClassName: my-local-ssd -``` - -The amount of storage needed is configured using the -`spec..resources.requests.storage` setting. - -Note that configuring storage is done per group of servers. -It is not possible to configure storage per individual -server. - -This is an example of a `Cluster` deployment that requests volumes of 80GB -for every dbserver, resulting in a total storage capacity of 240GB (with 3 dbservers). - -```yaml -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "cluster-using-local-ssh" -spec: - mode: Cluster - dbservers: - resources: - requests: - storage: 80Gi -``` - -## Local storage - -For optimal performance, ArangoDB should be configured with locally attached -SSD storage. - -The easiest way to accomplish this is to deploy an -[`ArangoLocalStorage` resource](./StorageResource.md). -The ArangoDB Storage Operator will use it to provide `PersistentVolumes` for you. - -This is an example of an `ArangoLocalStorage` resource that will result in -`PersistentVolumes` created on any node of the Kubernetes cluster -under the directory `/mnt/big-ssd-disk`. - -```yaml -apiVersion: "storage.arangodb.com/v1alpha" -kind: "ArangoLocalStorage" -metadata: - name: "example-arangodb-storage" -spec: - storageClass: - name: my-local-ssd - localPath: - - /mnt/big-ssd-disk -``` - -Note that using local storage required `VolumeScheduling` to be enabled in your -Kubernetes cluster. ON Kubernetes 1.10 this is enabled by default, on version -1.9 you have to enable it with a `--feature-gate` setting. - -### Manually creating `PersistentVolumes` - -The alternative is to create `PersistentVolumes` manually, for all servers that -need persistent storage (single, agents & dbservers). -E.g. for a `Cluster` with 3 agents and 5 dbservers, you must create 8 volumes. - -Note that each volume must have a capacity that is equal to or higher than the -capacity needed for each server. - -To select the correct node, add a required node-affinity annotation as shown -in the example below. - -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - name: volume-agent-1 - annotations: - "volume.alpha.kubernetes.io/node-affinity": '{ - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { "matchExpressions": [ - { "key": "kubernetes.io/hostname", - "operator": "In", - "values": ["node-1"] - } - ]} - ]} - }' -spec: - capacity: - storage: 100Gi - accessModes: - - ReadWriteOnce - persistentVolumeReclaimPolicy: Delete - storageClassName: local-ssd - local: - path: /mnt/disks/ssd1 -``` - -For Kubernetes 1.9 and up, you should create a `StorageClass` which is configured -to bind volumes on their first use as shown in the example below. -This ensures that the Kubernetes scheduler takes all constraints on a `Pod` -that into consideration before binding the volume to a claim. - -```yaml -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: local-ssd -provisioner: kubernetes.io/no-provisioner -volumeBindingMode: WaitForFirstConsumer -``` diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/StorageResource.md b/Documentation/Books/Manual/Deployment/Kubernetes/StorageResource.md deleted file mode 100644 index e680ac40a0f0..000000000000 --- a/Documentation/Books/Manual/Deployment/Kubernetes/StorageResource.md +++ /dev/null @@ -1,63 +0,0 @@ - -# ArangoLocalStorage Custom Resource - -The ArangoDB Storage Operator creates and maintains ArangoDB -storage resources in a Kubernetes cluster, given a storage specification. -This storage specification is a `CustomResource` following -a `CustomResourceDefinition` created by the operator. - -Example minimal storage definition: - -```yaml -apiVersion: "storage.arangodb.com/v1alpha" -kind: "ArangoLocalStorage" -metadata: - name: "example-arangodb-storage" -spec: - storageClass: - name: my-local-ssd - localPath: - - /mnt/big-ssd-disk -``` - -This definition results in: - -- a `StorageClass` called `my-local-ssd` -- the dynamic provisioning of PersistentVolume's with - a local volume on a node where the local volume starts - in a sub-directory of `/mnt/big-ssd-disk`. -- the dynamic cleanup of PersistentVolume's (created by - the operator) after one is released. - -The provisioned volumes will have a capacity that matches -the requested capacity of volume claims. - -## Specification reference - -Below you'll find all settings of the `ArangoLocalStorage` custom resource. - -### `spec.storageClass.name: string` - -This setting specifies the name of the storage class that -created `PersistentVolume` will use. - -If empty, this field defaults to the name of the `ArangoLocalStorage` -object. - -If a `StorageClass` with given name does not yet exist, it -will be created. - -### `spec.storageClass.isDefault: bool` - -This setting specifies if the created `StorageClass` will -be marked as default storage class. (default is `false`) - -### `spec.localPath: stringList` - -This setting specifies one of more local directories -(on the nodes) used to create persistent volumes in. - -### `spec.nodeSelector: nodeSelector` - -This setting specifies which nodes the operator will -provision persistent volumes on. diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/Tls.md b/Documentation/Books/Manual/Deployment/Kubernetes/Tls.md deleted file mode 100644 index 4591515c3949..000000000000 --- a/Documentation/Books/Manual/Deployment/Kubernetes/Tls.md +++ /dev/null @@ -1,55 +0,0 @@ - -# Secure connections (TLS) - -The ArangoDB Kubernetes Operator will by default create ArangoDB deployments -that use secure TLS connections. - -It uses a single CA certificate (stored in a Kubernetes secret) and -one certificate per ArangoDB server (stored in a Kubernetes secret per server). - -To disable TLS, set `spec.tls.caSecretName` to `None`. - -## Install CA certificate - -If the CA certificate is self-signed, it will not be trusted by browsers, -until you install it in the local operating system or browser. -This process differs per operating system. - -To do so, you first have to fetch the CA certificate from its Kubernetes -secret. - -```bash -kubectl get secret -ca --template='{{index .data "ca.crt"}}' | base64 -D > ca.crt -``` - -### Windows - -To install a CA certificate in Windows, follow the -[procedure described here](http://wiki.cacert.org/HowTo/InstallCAcertRoots). - -### macOS - -To install a CA certificate in macOS, run: - -```bash -sudo /usr/bin/security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain ca.crt -``` - -To uninstall a CA certificate in macOS, run: - -```bash -sudo /usr/bin/security remove-trusted-cert -d ca.crt -``` - -### Linux - -To install a CA certificate in Linux, on Ubuntu, run: - -```bash -sudo cp ca.crt /usr/local/share/ca-certificates/.crt -sudo update-ca-certificates -``` - -## See also - -- [Authentication](./Authentication.md) diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/Troubleshooting.md b/Documentation/Books/Manual/Deployment/Kubernetes/Troubleshooting.md deleted file mode 100644 index e384fad7f65d..000000000000 --- a/Documentation/Books/Manual/Deployment/Kubernetes/Troubleshooting.md +++ /dev/null @@ -1,116 +0,0 @@ - -# Troubleshooting - -While Kubernetes and the ArangoDB Kubernetes operator will automatically -resolve a lot of issues, there are always cases where human attention -is needed. - -This chapter gives your tips & tricks to help you troubleshoot deployments. - -## Where to look - -In Kubernetes all resources can be inspected using `kubectl` using either -the `get` or `describe` command. - -To get all details of the resource (both specification & status), -run the following command: - -```bash -kubectl get -n -o yaml -``` - -For example, to get the entire specification and status -of an `ArangoDeployment` resource named `my-arangodb` in the `default` namespace, -run: - -```bash -kubectl get ArangoDeployment my-arango -n default -o yaml -# or shorter -kubectl get arango my-arango -o yaml -``` - -Several types of resources (including all ArangoDB custom resources) support -events. These events show what happened to the resource over time. - -To show the events (and most important resource data) of a resource, -run the following command: - -```bash -kubectl describe -n -``` - -## Getting logs - -Another invaluable source of information is the log of containers being run -in Kubernetes. -These logs are accessible through the `Pods` that group these containers. - -To fetch the logs of the default container running in a `Pod`, run: - -```bash -kubectl logs -n -# or with follow option to keep inspecting logs while they are written -kubectl logs -n -f -``` - -To inspect the logs of a specific container in `Pod`, add `-c `. -You can find the names of the containers in the `Pod`, using `kubectl describe pod ...`. - -{% hint 'info' %} -Note that the ArangoDB operators are being deployed themselves as a Kubernetes `Deployment` -with 2 replicas. This means that you will have to fetch the logs of 2 `Pods` running -those replicas. -{% endhint %} - -## What if - -### The `Pods` of a deployment stay in `Pending` state - -There are two common causes for this. - -1) The `Pods` cannot be scheduled because there are not enough nodes available. - This is usually only the case with a `spec.environment` setting that has a value of `Production`. - - Solution: -Add more nodes. - -1) There are no `PersistentVolumes` available to be bound to the `PersistentVolumeClaims` - created by the operator. - - Solution: -Use `kubectl get persistentvolumes` to inspect the available `PersistentVolumes` -and if needed, use the [`ArangoLocalStorage` operator](./StorageResource.md) to provision `PersistentVolumes`. - -### When restarting a `Node`, the `Pods` scheduled on that node remain in `Terminating` state - -When a `Node` no longer makes regular calls to the Kubernetes API server, it is -marked as not available. Depending on specific settings in your `Pods`, Kubernetes -will at some point decide to terminate the `Pod`. As long as the `Node` is not -completely removed from the Kubernetes API server, Kubernetes will try to use -the `Node` itself to terminate the `Pod`. - -The `ArangoDeployment` operator recognizes this condition and will try to replace those -`Pods` with `Pods` on different nodes. The exact behavior differs per type of server. - -### What happens when a `Node` with local data is broken - -When a `Node` with `PersistentVolumes` hosted on that `Node` is broken and -cannot be repaired, the data in those `PersistentVolumes` is lost. - -If an `ArangoDeployment` of type `Single` was using one of those `PersistentVolumes` -the database is lost and must be restored from a backup. - -If an `ArangoDeployment` of type `ActiveFailover` or `Cluster` was using one of -those `PersistentVolumes`, it depends on the type of server that was using the volume. - -- If an `Agent` was using the volume, it can be repaired as long as 2 other agents are still healthy. -- If a `DBServer` was using the volume, and the replication factor of all database - collections is 2 or higher, and the remaining dbservers are still healthy, - the cluster will duplicate the remaining replicas to - bring the number of replicas back to the original number. -- If a `DBServer` was using the volume, and the replication factor of a database - collection is 1 and happens to be stored on that dbserver, the data is lost. -- If a single server of an `ActiveFailover` deployment was using the volume, and the - other single server is still healthy, the other single server will become leader. - After replacing the failed single server, the new follower will synchronize with - the leader. diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/Upgrading.md b/Documentation/Books/Manual/Deployment/Kubernetes/Upgrading.md deleted file mode 100644 index 8f48a5e98e4b..000000000000 --- a/Documentation/Books/Manual/Deployment/Kubernetes/Upgrading.md +++ /dev/null @@ -1,42 +0,0 @@ - -# Upgrading - -The ArangoDB Kubernetes Operator supports upgrading an ArangoDB from -one version to the next. - -## Upgrade an ArangoDB deployment - -To upgrade a cluster, change the version by changing -the `spec.image` setting and the apply the updated -custom resource using: - -```bash -kubectl apply -f yourCustomResourceFile.yaml -``` - -The ArangoDB operator will perform an sequential upgrade -of all servers in your deployment. Only one server is upgraded -at a time. - -For patch level upgrades (e.g. 3.3.9 to 3.3.10) each server -is stopped and restarted with the new version. - -For minor level upgrades (e.g. 3.3.9 to 3.4.0) each server -is stopped, then the new version is started with `--database.auto-upgrade` -and once that is finish the new version is started with the normal arguments. - -The process for major level upgrades depends on the specific version. - -## Upgrade the operator itself - -To update the ArangoDB Kubernetes Operator itself to a new version, -update the image version of the deployment resource -and apply it using: - -```bash -kubectl apply -f examples/yourUpdatedDeployment.yaml -``` - -## See also - -- [Scaling](./Scaling.md) \ No newline at end of file diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/Usage.md b/Documentation/Books/Manual/Deployment/Kubernetes/Usage.md deleted file mode 100644 index 86746904698e..000000000000 --- a/Documentation/Books/Manual/Deployment/Kubernetes/Usage.md +++ /dev/null @@ -1,117 +0,0 @@ - -# Using the ArangoDB Kubernetes Operator - -## Installation - -The ArangoDB Kubernetes Operator needs to be installed in your Kubernetes -cluster first. - -If you have `Helm` available, we recommend installation using `Helm`. - -### Installation with Helm - -To install the ArangoDB Kubernetes Operator with [`helm`](https://www.helm.sh/), -run (replace `` with the version of the operator that you want to install): - -```bash -export URLPREFIX=https://github.com/arangodb/kube-arangodb/releases/download/ -helm install $URLPREFIX/kube-arangodb-crd.tgz -helm install $URLPREFIX/kube-arangodb.tgz -``` - -This installs operators for the `ArangoDeployment` and `ArangoDeploymentReplication` -resource types. - -If you want to avoid the installation of the operator for the `ArangoDeploymentReplication` -resource type, add `--set=DeploymentReplication.Create=false` to the `helm install` -command. - -To use `ArangoLocalStorage` resources, also run: - -```bash -helm install $URLPREFIX/kube-arangodb-storage.tgz -``` - -For more information on installing with `Helm` and how to customize an installation, -see [Using the ArangoDB Kubernetes Operator with Helm](./Helm.md). - -### Installation with Kubectl - -To install the ArangoDB Kubernetes Operator without `Helm`, -run (replace `` with the version of the operator that you want to install): - -```bash -export URLPREFIX=https://raw.githubusercontent.com/arangodb/kube-arangodb//manifests -kubectl apply -f $URLPREFIX/arango-crd.yaml -kubectl apply -f $URLPREFIX/arango-deployment.yaml -``` - -To use `ArangoLocalStorage` resources, also run: - -```bash -kubectl apply -f $URLPREFIX/arango-storage.yaml -``` - -To use `ArangoDeploymentReplication` resources, also run: - -```bash -kubectl apply -f $URLPREFIX/arango-deployment-replication.yaml -``` - -You can find the latest release of the ArangoDB Kubernetes Operator -[in the kube-arangodb repository](https://github.com/arangodb/kube-arangodb/releases/latest). - -## ArangoDB deployment creation - -Once the operator is running, you can create your ArangoDB database deployment -by creating a `ArangoDeployment` custom resource and deploying it into your -Kubernetes cluster. - -For example (all examples can be found [in the kube-arangodb repository](https://github.com/arangodb/kube-arangodb/tree/master/examples)): - -```bash -kubectl apply -f examples/simple-cluster.yaml -``` - -## Deployment removal - -To remove an existing ArangoDB deployment, delete the custom -resource. The operator will then delete all created resources. - -For example: - -```bash -kubectl delete -f examples/simple-cluster.yaml -``` - -**Note that this will also delete all data in your ArangoDB deployment!** - -If you want to keep your data, make sure to create a backup before removing the deployment. - -## Operator removal - -To remove the entire ArangoDB Kubernetes Operator, remove all -clusters first and then remove the operator by running: - -```bash -helm delete -# If `ArangoLocalStorage` operator is installed -helm delete -``` - -or when you used `kubectl` to install the operator, run: - -```bash -kubectl delete deployment arango-deployment-operator -# If `ArangoLocalStorage` operator is installed -kubectl delete deployment -n kube-system arango-storage-operator -# If `ArangoDeploymentReplication` operator is installed -kubectl delete deployment arango-deployment-replication-operator -``` - -## See also - -- [Driver configuration](./DriverConfiguration.md) -- [Scaling](./Scaling.md) -- [Upgrading](./Upgrading.md) -- [Using the ArangoDB Kubernetes Operator with Helm](./Helm.md) \ No newline at end of file diff --git a/Documentation/Books/Manual/Deployment/Manually/README.md b/Documentation/Books/Manual/Deployment/Manually/README.md deleted file mode 100644 index 1ac76215df83..000000000000 --- a/Documentation/Books/Manual/Deployment/Manually/README.md +++ /dev/null @@ -1,22 +0,0 @@ -Manual Deployment -================= - -**Single Instance:** - -- [Manually created processes](../SingleInstance/ManualStart.md) -- [Manually created Docker containers](../SingleInstance/ManualStart.md#manual-start-in-docker) - -**Master/Slave:** - -- [Manually created processes](../MasterSlave/ManualStart.md) - -**Active Failover:** - -- [Manually created processes](../ActiveFailover/ManualStart.md) -- [Manually created Docker containers](../ActiveFailover/ManualStart.md#manual-start-in-docker) - -**Cluster:** - -- [Manually created processes](../Cluster/ManualStart.md) -- [Manually created Docker containers](../Cluster/ManualStart.md#manual-start-in-docker) - diff --git a/Documentation/Books/Manual/Deployment/MasterSlave/ManualStart.md b/Documentation/Books/Manual/Deployment/MasterSlave/ManualStart.md deleted file mode 100644 index dc1733868ed7..000000000000 --- a/Documentation/Books/Manual/Deployment/MasterSlave/ManualStart.md +++ /dev/null @@ -1,30 +0,0 @@ -Manual Start -============ - -Setting up a working _Master/Slave_ replication requires at least two ArangoDB -instances: - -1. **master:** this is the instance where all data-modification operations should -be directed to. - -1. **slave:** this is the instance that replicates, in an asynchronous way, the data -from the _master_. For the replication to happen, a _replication applier_ has to -be started on the slave. The _replication applier_ will fetch data from the _master_'s -_write-ahead log_ and apply its operations locally. One or more slaves can replicate -from the same master. - -Generally, one deploys the _master_ on a machine and each _slave_ on an additional, -separate, machine (one per _slave_). In case the _master_ and the _slaves_ are -running on the same machine (tests only), please make sure you use different ports -(and data directories) for the _master_ and the _slaves_. - -Please install the _master_ and the _slaves_ as they were, separate, -[single instances](../SingleInstance/README.md). There are no specific differences, -at this stage, between a _master_ a _slave_ and a _single instance_. - -Once the ArangoDB _master_ and _slaves_ have been deployed, the replication has -to be started on each of the available _slaves_. This can be done at database level, -or globally. - -For further information on how to set up the replication in _master/slave_ environment, -please refer to [this](../../Administration/MasterSlave/SettingUp.md) _Section_. \ No newline at end of file diff --git a/Documentation/Books/Manual/Deployment/MasterSlave/README.md b/Documentation/Books/Manual/Deployment/MasterSlave/README.md deleted file mode 100644 index c2438e703552..000000000000 --- a/Documentation/Books/Manual/Deployment/MasterSlave/README.md +++ /dev/null @@ -1,7 +0,0 @@ -Master/Slave Deployment -======================= - -This _Section_ describes how to deploy a _Master/Slave_ environment. - -For a general introduction to _Master/Slave_ in ArangoDB, please refer to the -[Master/Slave](../../Architecture/DeploymentModes/MasterSlave/README.md) chapter. diff --git a/Documentation/Books/Manual/Deployment/MigratingSingleInstanceCluster.md b/Documentation/Books/Manual/Deployment/MigratingSingleInstanceCluster.md deleted file mode 100644 index bd0d5c95ef35..000000000000 --- a/Documentation/Books/Manual/Deployment/MigratingSingleInstanceCluster.md +++ /dev/null @@ -1,23 +0,0 @@ -Migrating from a _Single Instance_ to a _Cluster_ -================================================== - -{% hint 'info' %} -Before migrating from a _Single Instance_ to a Cluster, -please read about -[**Single Instance vs. Cluster**](../Architecture/SingleInstanceVsCluster.md) -{% endhint %} - -To migrate from a _Single Instance_ to a _Cluster_ you will need -to take a backup from the _Single Instance_ and restore it into -the _Cluster_ with the tools [_arangodump_](../Programs/Arangodump/README.md) -and [_arangorestore_](../Programs/Arangorestore/README.md). - -{% hint 'warning' %} -If you have developed your application using a _Single Instance_ -and you would like to use a _Cluster_ now, before upgrading your production -system please test your application with the _Cluster_ first. - -If both your _Single Instance_ and _Cluster_ are running on the same -machine, they should have distinct data directories. It is not possible -to start a _Cluster_ on the data directory of a _Single Instance_. -{% endhint %} diff --git a/Documentation/Books/Manual/Deployment/Modes.md b/Documentation/Books/Manual/Deployment/Modes.md deleted file mode 100644 index d48fbfd80431..000000000000 --- a/Documentation/Books/Manual/Deployment/Modes.md +++ /dev/null @@ -1,14 +0,0 @@ -Deploying Options by ArangoDB _Deployment Mode_ -=============================================== - -- [Single instance](SingleInstance/README.md) -- [Master/Slave](MasterSlave/README.md) -- [Active Failover](ActiveFailover/README.md) -- [Cluster](Cluster/README.md) -- [Multiple Datacenters](DC2DC/README.md) -- [Standalone Agency](StandaloneAgency/README.md) - -Also see: - -- [Single Instance vs. Cluster](../Architecture/SingleInstanceVsCluster.md) -- [Migrating from Single Instance to Cluster](MigratingSingleInstanceCluster.md) diff --git a/Documentation/Books/Manual/Deployment/ProductionChecklist.md b/Documentation/Books/Manual/Deployment/ProductionChecklist.md deleted file mode 100644 index 05aae430aa86..000000000000 --- a/Documentation/Books/Manual/Deployment/ProductionChecklist.md +++ /dev/null @@ -1,36 +0,0 @@ -ArangoDB Production Checklist -============================= - -The following checklist can help to understand if important steps -have been performed on your production system before you go live: - -- Executed the OS optimization scripts (if you run ArangoDB on Linux). - See [Installing ArangoDB on Linux](../Installation/Linux.md) for details. -- OS monitoring is in place - (most common metrics, e.g. disk, CPU, RAM utilization). -- Disk space monitoring is in place - (only if you use the RocksDB storage engine). -- The user _root_ is not used to run any ArangoDB processes - (if you run ArangoDB on Linux). -- The _arangod_ (server) process and the _arangodb_ (_Starter_) process - (if in use) have some form of logging enabled and you can easily - locate and inspect them. -- If you use the _Starter_ to deploy, you stopped - and disabled - automated start of - the ArangoDB _Single Instance_, e.g. on Ubuntu: - - ``` - service arangodb3 stop - update-rc.d -f arangodb3 remove - ``` - - On Windows in a command prompt with elevated rights: - - ``` - sc config arangodb start= disabled - sc stop arangodb - ``` - -- If you have deployed a Cluster (and/or are using DC2DC) the - _replication factor_ of your collections is set to a value equal - or higher than 2 (so that you have **minimal - _data redundancy_ in place**). diff --git a/Documentation/Books/Manual/Deployment/README.md b/Documentation/Books/Manual/Deployment/README.md deleted file mode 100644 index 74dc9f5fb04b..000000000000 --- a/Documentation/Books/Manual/Deployment/README.md +++ /dev/null @@ -1,35 +0,0 @@ -Deployment -========== - -This _Chapter_ describes various possibilities to deploy ArangoDB. - -For installation instructions, please refer to the [Installation](../Installation/README.md) _Chapter_. - -For _production_ deployments, please also carefully check the -[ArangoDB Production Checklist](ProductionChecklist.md). - -Also check the description of -[Single Instance vs. Cluster](../Architecture/SingleInstanceVsCluster.md) and -[Migrating from Single Instance to Cluster](MigratingSingleInstanceCluster.md). - -By ArangoDB _Deployment Mode_: - -- [Single instance](SingleInstance/README.md) -- [Master/Slave](MasterSlave/README.md) -- [Active Failover](ActiveFailover/README.md) -- [Cluster](Cluster/README.md) -- [Multiple Datacenters](DC2DC/README.md) -- [Standalone Agency](StandaloneAgency/README.md) - -By _Technology_: - -- [Manually](Manually/README.md) -- [ArangoDB Starter](ArangoDBStarter/README.md) -- [Docker](Docker/README.md) -- [Kubernetes](Kubernetes/README.md) -- [Mesos, DC/OS](DCOS/README.md) - -In the _Cloud_: - -- [AWS](Cloud/AWS.md) -- [Azure](Cloud/Azure.md) diff --git a/Documentation/Books/Manual/Deployment/SingleInstance/ManualStart.md b/Documentation/Books/Manual/Deployment/SingleInstance/ManualStart.md deleted file mode 100644 index 128a78985bde..000000000000 --- a/Documentation/Books/Manual/Deployment/SingleInstance/ManualStart.md +++ /dev/null @@ -1,72 +0,0 @@ -Starting Manually -================= - -This section describes how to start an ArangoDB stand-alone instance by manually -starting the needed process. - -Local Start ------------ - -We will assume that your IP is 127.0.0.1 and that the port 8529 is free: - -``` -arangod --server.endpoint tcp://0.0.0.0:8529 \ - --database.directory standalone & -``` - -Manual Start in Docker ----------------------- - -Manually starting a stand-alone instance via Docker is basically the same as described -in the paragraph above. - -A bit of extra care has to be invested due to the way in which Docker isolates its network. -By default it fully isolates the network and by doing so an endpoint like `--server.endpoint tcp://0.0.0.0:8529` -will only bind to all interfaces inside the Docker container which does not include -any external interface on the host machine. This may be sufficient if you just want -to access it locally but in case you want to expose it to the outside you must -facilitate Dockers port forwarding using the `-p` command line option. Be sure to -check the [official Docker documentation](https://docs.docker.com/engine/reference/run/). - -You can simply use the `-p` flag in Docker to make the individual processes available on the host -machine or you could use Docker's [links](https://docs.docker.com/engine/reference/run/) -to enable process intercommunication. - -An example configuration might look like this: - -``` -docker run -e ARANGO_NO_AUTH=1 -p 192.168.1.1:10000:8529 arangodb/arangodb arangod \ - --server.endpoint tcp://0.0.0.0:8529\ -``` - -This will start a single server within a Docker container with an isolated network. -Within the Docker container it will bind to all interfaces (this will be 127.0.0.1:8529 -and some internal Docker IP on port 8529). By supplying `-p 192.168.1.1:10000:8529` -we are establishing a port forwarding from our local IP (192.168.1.1 port 10000 in -this example) to port 8529 inside the container. - -### Authentication - -To start the official Docker container you will have to decide on an authentication -method, otherwise the container will not start. - -Provide one of the arguments to Docker as an environment variable. There are three -options: - -1. ARANGO_NO_AUTH=1 - - Disable authentication completely. Useful for local testing or for operating - in a trusted network (without a public interface). - -2. ARANGO_ROOT_PASSWORD=password - - Start ArangoDB with the given password for root. - -3. ARANGO_RANDOM_ROOT_PASSWORD=1 - - Let ArangoDB generate a random root password. - -For an in depth guide about Docker and ArangoDB please check the official documentation: -https://hub.docker.com/r/arangodb/arangodb/ . Note that we are using the image -`arangodb/arangodb` here which is always the most current one. There is also the -"official" one called `arangodb` whose documentation is here: https://hub.docker.com/_/arangodb/ diff --git a/Documentation/Books/Manual/Deployment/SingleInstance/README.md b/Documentation/Books/Manual/Deployment/SingleInstance/README.md deleted file mode 100644 index a53b46c7f39e..000000000000 --- a/Documentation/Books/Manual/Deployment/SingleInstance/README.md +++ /dev/null @@ -1,16 +0,0 @@ -Single Instance Deployment -========================== - -Unlike other setups, like the _Active Failover_, _Cluster_, or _Multiple Datacenters_, -which require some specific procedure to be started once the ArangoDB package has -been installed, deploying a single-instance is straightforward. - -Depending on your operating system, after the installation the ArangoDB Server -might be already up and running. _Start_, _stop_ and _restart_ operations can be -handled directly using your _System and Service Manager_. - -The following are two additional ways that can be used to start the stand-alone -instance: - -1. Using the [_ArangoDB Starter_](UsingTheStarter.md), or -2. [manually](ManualStart.md). diff --git a/Documentation/Books/Manual/Deployment/SingleInstance/UsingTheStarter.md b/Documentation/Books/Manual/Deployment/SingleInstance/UsingTheStarter.md deleted file mode 100644 index 57d61bb08423..000000000000 --- a/Documentation/Books/Manual/Deployment/SingleInstance/UsingTheStarter.md +++ /dev/null @@ -1,91 +0,0 @@ - -Using the ArangoDB Starter -========================== - -This section describes how to start an ArangoDB stand-alone instance using the tool -[_Starter_](../../Programs/Starter/README.md) (the _arangodb_ binary program). - -Local Start ------------ - -If you want to start a stand-alone instance of ArangoDB, use the `--starter.mode=single` -option of the _Starter_: - -```bash -arangodb --starter.mode=single -``` - -Using the ArangoDB Starter in Docker ------------------------------------- - -The _Starter_ can also be used to launch a stand-alone instance based on _Docker_ -containers: - -```bash -export IP= -docker volume create arangodb -docker run -it --name=adb --rm -p 8528:8528 \ - -v arangodb:/data \ - -v /var/run/docker.sock:/var/run/docker.sock \ - arangodb/arangodb-starter \ - --starter.address=$IP \ - --starter.mode=single -``` - -If you use an ArangoDB version of 3.4 or above and use the Enterprise -Edition Docker image, you have to set the license key in an environment -variable by adding this option to the above `docker` command: - -``` - -e ARANGO_LICENSE_KEY= -``` - -You can get a free evaluation license key by visiting - - https://www.arangodb.com/download-arangodb-enterprise/ - -Then replace `` above with the actual license key. The start -will then hand on the license key to the Docker container it launches -for ArangoDB. - -### TLS verified Docker services - -Oftentimes, one needs to harden Docker services using client certificate -and TLS verification. The Docker API allows subsequently only certified access. -As the ArangoDB starter starts the ArangoDB cluster instances using this Docker API, -it is mandatory that the ArangoDB starter is deployed with the proper certificates -handed to it, so that the above command is modified as follows: - -```bash -export IP= -export DOCKER_TLS_VERIFY=1 -export DOCKER_CERT_PATH=/path/to/certificate -docker volume create arangodb -docker run -it --name=adb --rm -p 8528:8528 \ - -v arangodb:/data \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v /path/to/certificate:/path/to/certificate - arangodb/arangodb-starter \ - --starter.address=$IP \ - --starter.mode=single -``` - -Note that the enviroment variables `DOCKER_TLS_VERIFY` and `DOCKER_CERT_PATH` -as well as the additional mountpoint containing the certificate have been added above. -directory. The assignment of `DOCKER_CERT_PATH` is optional, in which case it -is mandatory that the certificates are stored in `$HOME/.docker`. So -the command would then be as follows - -```bash -export IP= -docker volume create arangodb -docker run -it --name=adb --rm -p 8528:8528 \ - -v arangodb:/data \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v /path/to/cert:/root/.docker \ - -e DOCKER_TLS_VERIFY=1 \ - arangodb/arangodb-starter \ - --starter.address=$IP \ - --starter.mode=single -``` - diff --git a/Documentation/Books/Manual/Deployment/StandaloneAgency/README.md b/Documentation/Books/Manual/Deployment/StandaloneAgency/README.md deleted file mode 100644 index 036b41982d33..000000000000 --- a/Documentation/Books/Manual/Deployment/StandaloneAgency/README.md +++ /dev/null @@ -1,197 +0,0 @@ -Launching ArangoDB's standalone _Agency_ -======================================== - -Multiple ArangoDB instances can be deployed as a fault-tolerant distributed state machine. - -What is a fault-tolerant state machine in the first place? - -In many service deployments consisting of arbitrary components distributed over multiple machines one is faced with the challenge of creating a dependable centralized knowledge base or configuration. Implementation of such a service turns out to be one of the most fundamental problems in information engineering. While it may seem as if the realization of such a service is easily conceivable, dependability formulates a paradox on computer networks per se. On the one hand, one needs a distributed system to avoid a single point of failure. On the other hand, one has to establish consensus among the computers involved. - -Consensus is the keyword here and its realization on a network proves to be far from trivial. Many papers and conference proceedings have discussed and evaluated this key challenge. Two algorithms, historically far apart, have become widely popular, namely Paxos and its derivatives and Raft. Discussing them and their differences, although highly enjoyable, must remain far beyond the scope of this document. Find the references to the main publications at the bottom of this page. - -At ArangoDB, we decided to implement Raft as it is arguably the easier to understand and thus implement. In simple terms, Raft guarantees that a linear stream of transactions, is replicated in realtime among a group of machines through an elected leader, who in turn must have access to and project leadership upon an overall majority of participating instances. In ArangoDB we like to call the entirety of the components of the replicated transaction log, that is the machines and the ArangoDB instances, which constitute the replicated log, the agency. - -Startup -------- - -The agency must consists of an odd number of agents in order to be able to establish an overall majority and some means for the agents to be able to find one another at startup. - -The most obvious way would be to inform all agents of the addresses and ports of the rest. This however, is more information than needed. For example, it would suffice, if all agents would know the address and port of the next agent in a cyclic fashion. Another straightforward solution would be to inform all agents of the address and port of say the first agent. - -Clearly all cases, which would form disjunct subsets of agents would break or in the least impair the functionality of the agency. From there on the agents will gossip the missing information about their peers. - -Typically, one achieves fairly high fault-tolerance with low, odd number of agents while keeping the necessary network traffic at a minimum. It seems that the typical agency size will be 3, 5 or 7 agents. - -The below commands start up a 3-host agency on one physical/logical box with ports 8531, 8541 and 8551 for demonstration purposes. The address of the first instance, port 8531, is known to the other two. After at most 2 rounds of gossipping, the last 2 agents will have a complete picture of their surroundings and persist it for the next restart. - -``` -./arangod --agency.activate true --agency.size 3 --agency.my-address tcp://localhost:8531 --server.authentication false --server.endpoint tcp://0.0.0.0:8531 agency-8531 -./arangod --agency.activate true --agency.size 3 --agency.my-address tcp://localhost:8541 --server.authentication false --server.endpoint tcp://0.0.0.0:8541 --agency.endpoint tcp://localhost:8531 agency-8541 -./arangod --agency.activate true --agency.size 3 --agency.my-address tcp://localhost:8551 --server.authentication false --server.endpoint tcp://0.0.0.0:8551 --agency.endpoint tcp://localhost:8531 agency-8551 -``` - -The parameter `agency.endpoint` is the key ingredient for the second and third instances to find the first instance and thus form a complete agency. Please refer to the the shell-script `scripts/startStandaloneAgency.sh` on GitHub or in the source directory. - -Key-value-store API -------------------- - -The agency should be up and running within a couple of seconds, during which the instances have gossiped their way into knowing the other agents and elected a leader. The public API can be checked for the state of the configuration: - -``` -curl -s localhost:8531/_api/agency/config -``` - -```js -{ - "term": 1, - "leaderId": "AGNT-cec78b63-f098-4b4e-a157-a7bebf7947ba", - "commitIndex": 1, - "lastCompactionAt": 0, - "nextCompactionAfter": 1000, - "lastAcked": { - "AGNT-cec78b63-f098-4b4e-a157-a7bebf7947ba": { - "lastAckedTime": 0, - "lastAckedIndex": 1 - }, - "AGNT-5c8d92ed-3fb5-4886-8990-742ddb4482fa": { - "lastAckedTime": 0.167, - "lastAckedIndex": 1, - "lastAppend": 15.173 - }, - "AGNT-f6e79b6f-d55f-4ae5-a5e2-4c2d6272b0b8": { - "lastAckedTime": 0.167, - "lastAckedIndex": 1, - "lastAppend": 15.173 - } - }, - "configuration": { - "pool": { - "AGNT-f6e79b6f-d55f-4ae5-a5e2-4c2d6272b0b8": "tcp://localhost:8551", - "AGNT-cec78b63-f098-4b4e-a157-a7bebf7947ba": "tcp://localhost:8531", - "AGNT-5c8d92ed-3fb5-4886-8990-742ddb4482fa": "tcp://localhost:8541" - }, - "active": [ - "AGNT-f6e79b6f-d55f-4ae5-a5e2-4c2d6272b0b8", - "AGNT-5c8d92ed-3fb5-4886-8990-742ddb4482fa", - "AGNT-cec78b63-f098-4b4e-a157-a7bebf7947ba" - ], - "id": "AGNT-cec78b63-f098-4b4e-a157-a7bebf7947ba", - "agency size": 3, - "pool size": 3, - "endpoint": "tcp://localhost:8531", - "min ping": 1, - "max ping": 5, - "timeoutMult": 1, - "supervision": false, - "supervision frequency": 1, - "compaction step size": 1000, - "compaction keep size": 50000, - "supervision grace period": 10, - "version": 4, - "startup": "origin" - }, - "engine": "rocksdb", - "version": "3.4.3" -} -``` - -To highlight some details in the above output look for `"term"` and `"leaderId"`. Both are key information about the current state of the Raft algorithm. You may have noted that the first election term has established a random leader for the agency, who is in charge of replication of the state machine and for all external read and write requests until such time that the process gets isolated from the other two subsequently losing its leadership. - -Read and Write APIs -------------------- - -Generally, all read and write accesses are transactions moreover any read and write access may consist of multiple such transactions formulated as arrays of arrays in JSON documents. - -Read transaction ----------------- - -An agency started from scratch will deal with the simplest query as follows: -``` -curl -L localhost:8531/_api/agency/read -d '[["/"]]' -``` - -```js -[{}] -``` - -The above request for an empty key value store will return with an empty document. The inner array brackets will aggregate a result from multiple sources in the key-value-store while the outer array will deliver multiple such aggregated results. Also note the `-L` curl flag, which allows the request to follow redirects to the current leader. - -Consider the following key-value-store: -```js -{ - "baz": 12, - "corge": { - "e": 2.718281828459045, - "pi": 3.14159265359 - }, - "foo": { - "bar": "Hello World" - }, - "qux": { - "quux": "Hello World" - } -} -``` - -The following array of read transactions will yield: - -``` -curl -L localhost:8531/_api/agency/read -d '[["/foo", "/foo/bar", "/baz"],["/qux"]]' -``` - -```js -[ - { - "baz": 12, - "foo": { - "bar": "Hello World" - } - }, - { - "qux": { - "quux": "Hello World" - } - } -] -``` - -Note that the result is an array of two results for the first and second read transactions from above accordingly. Also note that the results from the first read transaction are aggregated into -```js -{ - "baz": 12, - "foo": { - "bar": "Hello World" - } -} -``` - -The aggregation is performed on 2 levels: - -1. `/foo/bar` is eliminated as a subset of `/foo` -2. The results from `/foo` and `/bar` are joined - -The word transaction means here that it is guaranteed that all aggregations happen in quasi-realtime and that no write access could have happened in between. - -Btw, the same transaction on the virgin key-value store would produce `[{},{}]` - -Write API ---------- - -The write API must unfortunately be a little more complex. Multiple roads lead to Rome: - -``` -curl -L localhost:8531/_api/agency/write -d '[[{"/foo":{"op":"push","new":"bar"}}]]' -curl -L localhost:8531/_api/agency/write -d '[[{"/foo":{"op":"push","new":"baz"}}]]' -curl -L localhost:8531/_api/agency/write -d '[[{"/foo":{"op":"push","new":"qux"}}]]' -``` - -and - -``` -curl -L localhost:8531/_api/agency/write -d '[[{"foo":["bar","baz","qux"]}]]' -``` - -are equivalent for example and will create and fill an array at `/foo`. Here, again, the outermost array is the container for the transaction arrays. - -A complete guide of the API can be found in the [API section](../../../HTTP/Agency/index.html). - diff --git a/Documentation/Books/Manual/Deployment/Technology.md b/Documentation/Books/Manual/Deployment/Technology.md deleted file mode 100644 index d9f61b30beee..000000000000 --- a/Documentation/Books/Manual/Deployment/Technology.md +++ /dev/null @@ -1,8 +0,0 @@ -ArangoDB Deploying Options by _Technology_ -========================================== - -- [Manually](Manually/README.md) -- [ArangoDB Starter](ArangoDBStarter/README.md) -- [Docker](Docker/README.md) -- [Kubernetes](Kubernetes/README.md) -- [Mesos, DC/OS](DCOS/README.md) diff --git a/Documentation/Books/Manual/Downgrading/README.md b/Documentation/Books/Manual/Downgrading/README.md deleted file mode 100644 index b51b7390ca16..000000000000 --- a/Documentation/Books/Manual/Downgrading/README.md +++ /dev/null @@ -1,81 +0,0 @@ -Downgrading -=========== - -A direct, in-place downgrade of ArangoDB is **not** supported. If you have upgraded -your ArangoDB package, and then also upgraded your current data directory, it is -not supported to downgrade the package and start an older ArangoDB version on a -data directory that was upgraded already. - -If you are using a standalone ArangoDB server, data directory could have been upgraded -automatically during package upgrade. If you are using the _Starter_ to start your -ArangoDB, and you have not triggered yet the rolling upgrade, or upgraded it -manually, your data directory is (probably) still on the old version, so you should -be able to binary downgrade in this case. - -Supported Downgrade Procedures ------------------------------- - -In order to downgrade, the following options are available: - -- Restore a backup you took using the tool [Arangodump](../Programs/Arangodump/README.md) - before the upgrade. -- Start the old package on the data directory backup you took before the upgrade. - -### Restore an _arangodump_ Backup - -This procedure assumes that you have taken an _arangodump_ backup using the old -ArangoDB version, before you upgraded it. - -1. Stop ArangoDB (if you are using an Active Failover, or a Cluster, stop all the needed - processes on all the machines). -2. As extra precaution, take a backup of your current data directory (at filesystem level). - If you are using an Active Failover or a Cluster, you will need to backup all the data - directories of all the processes involved, from all machines. Make sure you move your - data directory backup to a safe place. -3. Uninstall the ArangoDB package (use appropriate _purge_ option so your current data - directory is deleted, if you are using a stand alone instance). -4. Install the old version of ArangoDB. -5. Start ArangoDB. If you are using the _Starter_, please make sure you use a new data - directory for the _Starter_. -6. Restore the _arangodump_ backup taken before upgrading. - -### Start the old package on the data directory backup - -This procedure assumes that you have created a copy of your data directory (after having -stopped the ArangoDB process running on it) before the upgrade. If you are running an -Active Failover, or a Cluster, this procedure assumes that you have stopped them before -the upgrade, and that you have taken a copy of their data directories, from all involved -machines. - -This procedure cannot be used if you have done a rolling upgrade of your Active Failover -or Cluster setups (because in this case you do not have a copy of the data directories. - -1. Stop ArangoDB (if you are using an Active Failover, or a Cluster, stop all the needed - processes on all the machines). -2. As extra precaution, take a backup of your data directory (at filesystem level). If - you are using an Active Failover or a Cluster, you will need to backup all the data - directories of all the processes involved, from all machines. Make sure you move your - backup to a safe place. -3. Uninstall the ArangoDB package (use appropriate _purge_ option so your current data - directory is deleted, if you are using a stand alone instance). -4. Install the old version of ArangoDB. -5. Start ArangoDB on the data directory that you have backup-ed up (at filesystem level) - before the upgrade. As an extra precaution, please first take a new copy of this - directory and move it to a safe place. - -### Other possibilities - -If you have upgraded by mistake, and: - -- your data directory has been upgraded already -- it is not possible for you to follow any of the - [Supported Downgrade Procedures](#supported-downgrade-procedures) because: - - you do not have a _dump_ backup taken using the old ArangoDB version - - you do not have a copy of your data directory taken after stopping the old ArangoDB - process and before the upgrade - -...one possible option to downgrade could be to export the data from the new ArangoDB version -using the tool _arangoexport_ and reimport it using the tool _arangoimport_ in the old -version (after having installed and started it on a clean data directory). This method will -require some manual work to recreate the structure of your collections and your indices - but -it might still help you solving an otherwise challenging situation. diff --git a/Documentation/Books/Manual/FOOTER.html b/Documentation/Books/Manual/FOOTER.html deleted file mode 100644 index 239869bfaf6a..000000000000 --- a/Documentation/Books/Manual/FOOTER.html +++ /dev/null @@ -1 +0,0 @@ -© ArangoDB - the native multi-model NoSQL database \ No newline at end of file diff --git a/Documentation/Books/Manual/Foxx/Deployment.md b/Documentation/Books/Manual/Foxx/Deployment.md deleted file mode 100644 index d5056d41b5b5..000000000000 --- a/Documentation/Books/Manual/Foxx/Deployment.md +++ /dev/null @@ -1,15 +0,0 @@ -Deployment -========== - -Foxx services can be deployed in multiple ways: - -- [Foxx CLI](../Programs/FoxxCLI/README.md), a command line tool which - requires Node.js - -- [HTTP API](../../HTTP/Foxx/index.html) using HTTP requests, - e.g. with curl - -- [web interface](../Programs/WebInterface/Services.md) under *SERVICES* - -See [the Foxx cluster guide](Guides/Cluster.md#how-arangodb-distributes-services) -for how Foxx services are distributed to coordinators. diff --git a/Documentation/Books/Manual/Foxx/GettingStarted.md b/Documentation/Books/Manual/Foxx/GettingStarted.md deleted file mode 100644 index 211d2df6ff48..000000000000 --- a/Documentation/Books/Manual/Foxx/GettingStarted.md +++ /dev/null @@ -1,476 +0,0 @@ -Getting Started -=============== - -This practical introduction will take you from an empty folder to a first -Foxx service querying data. - -Manifest --------- - -We're going to start with an empty folder. This will be the root folder of our -services. You can name it something clever but for the course of this guide -we'll assume it's called the name of your service: `getting-started`. - -First we need to create a manifest. Create a new file called `manifest.json` -and add the following content: - -```json -{ - "engines": { - "arangodb": "^3.0.0" - } -} -``` - -This just tells ArangoDB the service is compatible with versions 3.0.0 and -later (all the way up to but not including 4.0.0), allowing older versions -of ArangoDB to understand that this service likely won't work for them and -newer versions what behavior to emulate should they still support it. - -The little hat to the left of the version number is not a typo, it's called a -"caret" and indicates the version range. Foxx uses semantic versioning (also -called "semver") for most of its version handling. You can find out more about -how semver works at the [official semver website](http://semver.org). - -Next we'll need to specify an entry point to our service. This is the -JavaScript file that will be executed to define our service's HTTP endpoints. -We can do this by adding a "main" field to our manifest: - -```json -{ - "engines": { - "arangodb": "^3.0.0" - }, - "main": "index.js" -} -``` - -That's all we need in our manifest for now. - -Router ------- - -Let's next create the `index.js` file: - -```js -'use strict'; -const createRouter = require('@arangodb/foxx/router'); -const router = createRouter(); - -module.context.use(router); -``` - -The first line causes our file to be interpreted using -[strict mode](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Strict_mode). -All examples in the ArangoDB documentation assume strict mode, so you might -want to familiarize yourself with it if you haven't encountered it before. - -The second line imports the `@arangodb/foxx/router` module which provides a -function for creating new Foxx routers. We're using this function to create a -new `router` object which we'll be using for our service. - -The `module.context` is the so-called Foxx context or service context. -This variable is available in all files that are part of your Foxx service and -provides access to Foxx APIs specific to the current service, like the `use` -method, which tells Foxx to mount the `router` in this service (and to expose -its routes to HTTP). - -Next let's define a route that prints a generic greeting: - -```js -// continued -router.get('/hello-world', function (req, res) { - res.send('Hello World!'); -}) -.response(['text/plain'], 'A generic greeting.') -.summary('Generic greeting') -.description('Prints a generic greeting.'); -``` - -The `router` provides the methods `get`, `post`, etc corresponding to each -HTTP verb as well as the catch-all `all`. These methods indicate that the given -route should be used to handle incoming requests with the given HTTP verb -(or any method when using `all`). - -These methods take an optional path (if omitted, it defaults to `"/"`) as well -as a request handler, which is a function taking the `req` -([request](Reference/Routers/Request.md)) and `res` -([response](Reference/Routers/Response.md)) objects to handle the incoming -request and generate the outgoing response. If you have used the express -framework in Node.js, you may already be familiar with how this works, -otherwise check out the chapter on [routes](Reference/Routers/Endpoints.md). - -The object returned by the router's methods provides additional methods to -attach metadata and validation to the route. We're using `summary` and -`description` to document what the route does -- these aren't strictly -necessary but give us some nice auto-generated documentation. -The `response` method lets us additionally document the response content -type and what the response body will represent. - -Try it out ----------- - -At this point you can upload the service folder as a zip archive from the -web interface using the *Services* tab. - - - -Click *Add Service* then pick the *Zip* option in the dialog. You will need -to provide a *mount path*, which is the URL prefix at which the service will -be mounted (e.g. `/getting-started`). - - - -Once you have picked the zip archive using the file picker, the upload should -begin immediately and your service should be installed. Otherwise press the -*Install* button and wait for the dialog to disappear and the service to show -up in the service list. - - - -Click anywhere on the card with your mount path on the label to open the -service's details. - - - -In the API documentation you should see the route we defined earlier -(`/hello-world`) with the word `GET` next to it indicating the HTTP method it -supports and the `summary` we provided on the right. By clicking on the -route's path you can open the documentation for the route. - - - -Note that the `description` we provided appears in the generated documentation -as well as the description we added to the `response` (which should correctly -indicate the content type `text/plain`, i.e. plain text). - -Click the *Try it out!* button to send a request to the route and you should -see an example request with the service's response: "Hello World!". - - - -Congratulations! You have just created, installed and used your first Foxx service. - -Parameter validation --------------------- - -Let's add another route that provides a more personalized greeting: - -```js -// continued -const joi = require('joi'); - -router.get('/hello/:name', function (req, res) { - res.send(`Hello ${req.pathParams.name}`); -}) -.pathParam('name', joi.string().required(), 'Name to greet.') -.response(['text/plain'], 'A personalized greeting.') -.summary('Personalized greeting') -.description('Prints a personalized greeting.'); -``` - -The first line imports the [`joi` module from npm](https://www.npmjs.com/package/joi) -which comes bundled with ArangoDB. Joi is a validation library that is used -throughout Foxx to define schemas and parameter types. - -**Note**: You can bundle your own modules from npm by installing them in your -service folder and making sure the `node_modules` folder is included in your -zip archive. For more information see the chapter on -[bundling node modules](Guides/BundledNodeModules.md). - -The `pathParam` method allows us to specify parameters we are expecting in -the path. The first argument corresponds to the parameter name in the path, -the second argument is a joi schema the parameter is expected to match and -the final argument serves to describe the parameter in the API documentation. - -The path parameters are accessible from the `pathParams` property of the -request object. We're using a template string to generate the server's -response containing the parameter's value. - -Note that routes with path parameters that fail to validate for the request URL -will be skipped as if they wouldn't exist. This allows you to define multiple -routes that are only distinguished by the schemas of their path parameters (e.g. -a route taking only numeric parameters and one taking any string as a fallback). - - - -Let's take this further and create a route that takes a JSON request body: - -```js -// continued -router.post('/sum', function (req, res) { - const values = req.body.values; - res.send({ - result: values.reduce(function (a, b) { - return a + b; - }, 0) - }); -}) -.body(joi.object({ - values: joi.array().items(joi.number().required()).required() -}).required(), 'Values to add together.') -.response(joi.object({ - result: joi.number().required() -}).required(), 'Sum of the input values.') -.summary('Add up numbers') -.description('Calculates the sum of an array of number values.'); -``` - -Note that we used `post` to define this route instead of `get` (which does not -support request bodies). Trying to send a GET request to this route's URL -(in the absence of a `get` route for the same path) will result in Foxx -responding with an appropriate error response, indicating the supported -HTTP methods. - -As this route not only expects a JSON object as input but also responds with -a JSON object as output we need to define two schemas. We don't strictly need -a response schema but it helps documenting what the route should be expected -to respond with and will show up in the API documentation. - -Because we're passing a schema to the `response` method we don't need to -explicitly tell Foxx we are sending a JSON response. The presence of a schema -in the absence of a content type always implies we want JSON. Though we could -just add `["application/json"]` as an additional argument after the schema if -we wanted to make this more explicit. - -The `body` method works the same way as the `response` method except the schema -will be used to validate the request body. If the request body can't be parsed -as JSON or doesn't match the schema, Foxx will reject the request with an -appropriate error response. - - - -Creating collections --------------------- - -The real power of Foxx comes from interacting with the database itself. -In order to be able to use a collection from within our service, we should -first make sure that the collection actually exists. The right place to create -collections your service is going to use is in -[a *setup* script](Guides/Scripts.md), which Foxx will execute for you when -installing or updating the service. - -First create a new folder called "scripts" in the service folder, which will -be where our scripts are going to live. For simplicity's sake, our setup -script will live in a file called `setup.js` inside that folder: - -```js -// continued -'use strict'; -const db = require('@arangodb').db; -const collectionName = 'myFoxxCollection'; - -if (!db._collection(collectionName)) { - db._createDocumentCollection(collectionName); -} -``` - -The script uses the [`db` object](../Appendix/References/DBObject.md) from -the `@arangodb` module, which lets us interact with the database the Foxx -service was installed in and the collections inside that database. Because the -script may be executed multiple times (i.e. whenever we update the service or -when the server is restarted) we need to make sure we don't accidentally try -to create the same collection twice (which would result in an exception); -we do that by first checking whether it already exists before creating it. - -The `_collection` method looks up a collection by name and returns `null` if no -collection with that name was found. The `_createDocumentCollection` method -creates a new document collection by name (`_createEdgeCollection` also exists -and works analogously for edge collections). - -**Note**: Because we have hardcoded the collection name, multiple copies of -the service installed alongside each other in the same database will share -the same collection. -Because this may not always be what you want, the [Foxx context](Reference/Context.md) -also provides the `collectionName` method which applies a mount point specific -prefix to any given collection name to make it unique to the service. It also -provides the `collection` method, which behaves almost exactly like `db._collection` -except it also applies the prefix before looking the collection up. - -Next we need to tell our service about the script by adding it to the manifest file: - -```json -{ - "engines": { - "arangodb": "^3.0.0" - }, - "main": "index.js", - "scripts": { - "setup": "scripts/setup.js" - } -} -``` - -The only thing that has changed is that we added a "scripts" field specifying -the path of the setup script we just wrote. - -Go back to the web interface and update the service with our new code, then -check the *Collections* tab. If everything worked right, you should see a new -collection called "myFoxxCollection". - - - -Accessing collections ---------------------- - -Let's expand our service by adding a few more routes to our `index.js`: - -```js -// continued -const db = require('@arangodb').db; -const errors = require('@arangodb').errors; -const foxxColl = db._collection('myFoxxCollection'); -const DOC_NOT_FOUND = errors.ERROR_ARANGO_DOCUMENT_NOT_FOUND.code; - -router.post('/entries', function (req, res) { - const data = req.body; - const meta = foxxColl.save(req.body); - res.send(Object.assign(data, meta)); -}) -.body(joi.object().required(), 'Entry to store in the collection.') -.response(joi.object().required(), 'Entry stored in the collection.') -.summary('Store an entry') -.description('Stores an entry in the "myFoxxCollection" collection.'); - -router.get('/entries/:key', function (req, res) { - try { - const data = foxxColl.document(req.pathParams.key); - res.send(data) - } catch (e) { - if (!e.isArangoError || e.errorNum !== DOC_NOT_FOUND) { - throw e; - } - res.throw(404, 'The entry does not exist', e); - } -}) -.pathParam('key', joi.string().required(), 'Key of the entry.') -.response(joi.object().required(), 'Entry stored in the collection.') -.summary('Retrieve an entry') -.description('Retrieves an entry from the "myFoxxCollection" collection by key.'); -``` - -We're using the `save` and `document` methods of the collection object to store -and retrieve documents in the collection we created in our setup script. -Because we don't care what the documents look like we allow any attributes on -the request body and just accept an object. - -Because the key will be automatically generated by ArangoDB when one wasn't -specified in the request body, we're using `Object.assign` to apply the -attributes of the metadata object returned by the `save` method to the document -before returning it from our first route. - -The `document` method returns a document in a collection by its `_key` or `_id`. -However when no matching document exists it throws an `ArangoError` exception. -Because we want to provide a more descriptive error message than ArangoDB does -out of the box, we need to handle that error explicitly. - -All `ArangoError` exceptions have a truthy attribute `isArangoError` that helps -you recognizing these errors without having to worry about `instanceof` checks. -They also provide an `errorNum` and an `errorMessage`. If you want to check for -specific errors you can just import the `errors` object from the `@arangodb` -module instead of having to memorize numeric error codes. - -Instead of defining our own response logic for the error case we just use -`res.throw`, which makes the response object throw an exception Foxx can -recognize and convert to the appropriate server response. We also pass along -the exception itself so Foxx can provide more diagnostic information when we -want it to. - -We could extend the post route to support arrays of objects as well, each -object following a certain schema: - -```js -// store schema in variable to make it re-usable, see .body() -const docSchema = joi.object().required().keys({ - name: joi.string().required(), - age: joi.number().required() -}).unknown(); // allow additional attributes - -router.post('/entries', function (req, res) { - const multiple = Array.isArray(req.body); - const body = multiple ? req.body : [req.body]; - - let data = []; - for (var doc of body) { - const meta = foxxColl.save(doc); - data.push(Object.assign(doc, meta)); - } - res.send(multiple ? data : data[0]); - -}) -.body(joi.alternatives().try( - docSchema, - joi.array().items(docSchema) -), 'Entry or entries to store in the collection.') -.response(joi.alternatives().try( - joi.object().required(), - joi.array().items(joi.object().required()) -), 'Entry or entries stored in the collection.') -.summary('Store entry or entries') -.description('Store a single entry or multiple entries in the "myFoxxCollection" collection.'); -``` - -Writing database queries ------------------------- - -Storing and retrieving entries is fine, but right now we have to memorize each -key when we create an entry. Let's add a route that gives us a list of the -keys of all entries so we can use those to look an entry up in detail. - -The naïve approach would be to use the `toArray()` method to convert the entire -collection to an array and just return that. But we're only interested in the -keys and there might potentially be so many entries that first retrieving every -single document might get unwieldy. Let's write a short AQL query to do this instead: - -```js -// continued -const aql = require('@arangodb').aql; - -router.get('/entries', function (req, res) { - const keys = db._query(aql` - FOR entry IN ${foxxColl} - RETURN entry._key - `); - res.send(keys); -}) -.response(joi.array().items( - joi.string().required() -).required(), 'List of entry keys.') -.summary('List entry keys') -.description('Assembles a list of keys of entries in the collection.'); -``` - -Here we're using two new things: - -The `_query` method executes an AQL query in the active database. - -The `aql` template string handler allows us to write multi-line AQL queries and -also handles query parameters and collection names. Instead of hardcoding the -name of the collection we want to use in the query we can simply reference the -`foxxColl` variable we defined earlier – it recognizes the value as an ArangoDB -collection object and knows we are specifying a collection rather than a regular -value even though AQL distinguishes between the two. - -**Note**: If you aren't used to JavaScript template strings and template string -handlers just think of `aql` as a function that receives the multiline string -split at every `${}` expression as well as an array of the values of those -expressions – that's actually all there is to it. - -Alternatively, here's a version without template strings (notice how much -cleaner the `aql` version will be in comparison when you have multiple variables): - -```js -const keys = db._query( - 'FOR entry IN @@coll RETURN entry._key', - {'@coll': foxxColl.name()} -); -``` - -Next steps ----------- - -You now know how to create a Foxx service from scratch, how to handle user -input and how to access the database from within your Foxx service to store, -retrieve and query data you store inside ArangoDB. This should allow you to -build meaningful APIs for your own applications but there are many more things -you can do with Foxx. See the [Guides](Guides/README.md) chapter for more. diff --git a/Documentation/Books/Manual/Foxx/Guides/Auth.md b/Documentation/Books/Manual/Foxx/Guides/Auth.md deleted file mode 100644 index 98053f35c375..000000000000 --- a/Documentation/Books/Manual/Foxx/Guides/Auth.md +++ /dev/null @@ -1,242 +0,0 @@ -Authentication -============== - -Foxx provides the [auth module](../Reference/Modules/Auth.md) to implement -basic password verification and hashing but is not very secure unless using -the (very slow) PBKDF2 algorithm. Alternatively you can use the -[OAuth 1.0a](../Reference/Modules/OAuth1.md) or -[OAuth 2.0](../Reference/Modules/OAuth2.md) modules to offload identity -management to a trusted provider (e.g. Facebook, GitHub, Google or Twitter). - -The [session middleware](../Reference/Sessions/README.md) provides a mechanism -for adding session logic to your service, using e.g. a collection or -JSON Web Tokens to store the sessions between requests. - -With these building blocks you can implement your own session-based -authentication. - -Implementing session authentication ------------------------------------ - -In this example we'll use two collections: a `users` collection to store the -user objects with names and credentials, and a `sessions` collection to store -the session data. We'll also make sure usernames are unique -by adding a hash index: - -```js -"use strict"; -const { db } = require("@arangodb"); -const users = module.context.collectionName("users"); -if (!db._collection(users)) { - db._createDocumentCollection(users); -} -const sessions = module.context.collectionName("sessions"); -if (!db._collection(sessions)) { - db._createDocumentCollection(sessions); -} -module.context.collection("users").ensureIndex({ - type: "hash", - unique: true, - fields: ["username"] -}); -``` - -Next you should create a sessions middleware that uses the `sessions` -collection and the "cookie" transport in a separate file, and add it -to the service router: - -```js -// in util/sessions.js -"use strict"; -const sessionsMiddleware = require("@arangodb/foxx/sessions"); -const sessions = sessionsMiddleware({ - storage: module.context.collection("sessions"), - transport: "cookie" -}); -module.exports = sessions; -``` - -```js -// in your main file -// ... -const sessions = require("./util/sessions"); -module.context.use(sessions); -``` - -You'll want to be able to use the authenticator throughout multiple parts -of your service so it's best to create it in a separate module and export it -so we can import it anywhere we need it: - -```js -"use strict"; -const createAuth = require("@arangodb/foxx/auth"); -const auth = createAuth(); -module.exports = auth; -``` - -If you want, you can now use the authenticator to help create an initial user -in the setup script. Note we're hardcoding the password here but you could -make it configurable via a -[service configuration option](../Reference/Configuration.md): - -```js -// ... -const auth = require("./util/auth"); -const users = module.context.collection("users"); -if (!users.firstExample({ username: "admin" })) { - users.save({ - username: "admin", - password: auth.create("hunter2") - }); -} -``` - -We can now put the two together to create a login route: - -```js -// ... -const auth = require("./util/auth"); -const users = module.context.collection("users"); -const joi = require("joi"); -const createRouter = require("@arangodb/foxx/router"); -const router = createRouter(); - -router - .post("/login", function(req, res) { - const user = users.firstExample({ - username: req.body.username - }); - const valid = auth.verify( - // Pretend to validate even if no user was found - user ? user.authData : {}, - req.body.password - ); - if (!valid) res.throw("unauthorized"); - // Log the user in using the key - // because usernames might change - req.session.uid = user._key; - req.sessionStorage.save(req.session); - res.send({ username: user.username }); - }) - .body( - joi - .object({ - username: joi.string().required(), - password: joi.string().required() - }) - .required() - ); -``` - -To provide information about the authenticated user we can look up -the session user: - -```js -router.get("/me", function(req, res) { - try { - const user = users.document(req.session.uid); - res.send({ username: user.username }); - } catch (e) { - res.throw("not found"); - } -}); -``` - -To log a user out we can remove the user from the session: - -```js -router.post("/logout", function(req, res) { - if (req.session.uid) { - req.session.uid = null; - req.sessionStorage.save(req.session); - } - res.status("no content"); -}); -``` - -Finally when using the collection-based session storage, it's a good idea to -clean up expired sessions in a script which we can periodically call via an -external tool like `cron` or a [Foxx queue](../Reference/Modules/Queues.md): - -```js -"use strict"; -const sessions = require("./util/sessions"); -module.exports = sessions.storage.prune(); -``` - -Using ArangoDB authentication ------------------------------ - -When using HTTP Basic authentication, ArangoDB will set the `arangoUser` -attribute of the [request object](../Reference/Routers/Request.md) if the -credentials match a valid ArangoDB user for the database. - -**Note**: Although the presence and value of this attribute can be used to -implement a low-level authentication mechanism this is only useful if your -service is only intended to be used by developers who already have access to -the HTTP API or the administrative web interface. - -Example: - -```js -router.get("/me", function(req, res) { - if (req.arangoUser) { - res.json({ username: req.arangoUser }); - } else { - res.throw("not found"); - } -}); -``` - -Alternative sessions implementation ------------------------------------ - -If you need more control than the sessions middleware provides, -you can also create a basic session system with a few lines of code yourself: - -```js -"use strict"; -const sessions = module.context.collection("sessions"); -// This is the secret string used to sign cookies -// you probably don't want to hardcode this. -const secret = "some secret string"; - -module.context.use((req, res, next) => { - // First read the session cookie if present - let sid = req.cookie("sid", { secret }); - if (sid) { - try { - // Try to find a matching session - req.session = sessions.document(sid); - } catch (e) { - // No session found, cookie is invalid - sid = null; - // Clear the cookie so it will be discarded - res.cookie("sid", "", { ttl: -1, secret }); - } - } - try { - // Continue handling the request - next(); - } finally { - // Do this even if the request threw - if (req.session) { - if (sid) { - // Sync the session's changes to the db - sessions.update(sid, req.session); - } else { - // Create a new session with a new key - sid = sessions.save(req.session)._key; - } - // Set or update the session cookie - res.cookie("sid", sid, { ttl: 24 * 60 * 60, secret }); - } else if (sid) { - // The request handler explicitly cleared - // the session, so we need to delete it - sessions.remove(sid); - // And clear the cookie too - res.cookie("sid", "", { ttl: -1, secret }); - } - } -}); -``` diff --git a/Documentation/Books/Manual/Foxx/Guides/Browser.md b/Documentation/Books/Manual/Foxx/Guides/Browser.md deleted file mode 100644 index bfe3059ea683..000000000000 --- a/Documentation/Books/Manual/Foxx/Guides/Browser.md +++ /dev/null @@ -1,151 +0,0 @@ -Exposing Foxx to the browser -============================ - -There are three ways to use Foxx in a web application: - -1. Accessing Foxx from an application server that exposes its own API. - -2. Using a web server like Apache or nginx as a reverse proxy to expose - only the Foxx service. - -3. Exposing ArangoDB directly by running ArangoDB on a public port. - -Using an application server ---------------------------- - -Accessing Foxx from an application server is probably the safest approach as -the application server shields the database from the browser entirely. However -this also adds the most development overhead and may result in unnecessary -duplication of access logic. - -This approach works best if you're using Foxx in an existing application stack -or want to use an [ArangoDB driver](https://www.arangodb.com/arangodb-drivers/) -to access the database API directly alongside your Foxx service. - -As Foxx services provide ordinary HTTP endpoints, you can access them from your -existing application server using any run-of-the-mill HTTP client with JSON -support. Some ArangoDB drivers also let you access arbitrary HTTP endpoints. - -Example (Node with arangojs): - -```js -"use strict"; -const express = require("express"); -const app = express(); -const { Database } = require("arangojs"); -const db = new Database(); -db.useDatabase("mydb"); -const service = db.route("/my-foxx"); -app.get("/", async function(req, res) { - // Passes the response from '/_db/mydb/my-foxx/hello' - const response = await service.get("/hello"); - res.status(response.statusCode); - res.write(response.body); - res.end(); -}); -app.listen(9000); -``` - -Using a reverse proxy ---------------------- - -For information on setting up the Apache web server as a reverse proxy check -[the official Apache 2.4 documentation](https://httpd.apache.org/docs/2.4/howto/reverse_proxy.html). -For nginx check -[the nginx admin guide](https://docs.nginx.com/nginx/admin-guide/web-server/reverse-proxy/). -Similar documentation exists for -[lighttpd](https://redmine.lighttpd.net/projects/1/wiki/Docs_ModProxy) and -[Microsoft IIS](https://blogs.msdn.microsoft.com/friis/2016/08/25/setup-iis-with-url-rewrite-as-a-reverse-proxy-for-real-world-apps/). - -Example (nginx): - -```nginx -location /api/ { - proxy_pass http://127.0.0.1:8529/_db/_system/my-foxx/; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Host $host:$server_port; - proxy_set_header X-Forwarded-Proto $scheme; -} -``` - -Example (Apache): - -```apacheconf - - RequestHeader set X-Forwarded-Proto "https" # or "http" - # Other X-Forwarded-* headers are set automatically - ProxyPass http://127.0.0.1:8529/_db/_system/my-foxx - -``` - -The advantage of this approach is that it allows you to expose just the service -itself without exposing the entire database API. - -This approach also works well if you're already using a web server to serve -your web application frontend files and want your frontend to talk directly to -the service. - -**Note**: when running Foxx behind a reverse proxy some properties of the -request object will reflect the proxy rather than the original request source -(i.e. the browser). You can tell Foxx to expect to run behind a trusted proxy -by enabling the `trustProxy` property of the service context: - -```js -// in your main entry file, e.g. index.js -module.context.trustProxy = true; -``` - -Foxx will then trust the values of the following request headers: - -- `x-forwarded-proto` for `req.protocol` -- `x-forwarded-host` for `req.hostname` and `req.port` -- `x-forwarded-port` for `req.port` -- `x-forwarded-for` for `req.remoteAddress` and `req.remoteAddresses` - -Note that this property needs to be set in your main entry file. Setting it in -the setup script has no effect. - -Exposing ArangoDB directly --------------------------- - -This is the most obvious but also most dangerous way to expose your Foxx -service. Running ArangoDB on a public port will expose the entire database API -and allow anyone who can guess your database credentials to do whatever -they want. - -Unless your service is explicitly intended to be used by people who already -have access to the ArangoDB web interface, you should go with one of the other -approaches instead. - -{% hint 'danger' %} -Only use this for internal services intended to help -users who already have full access to the database. -**Don't ever expose your database to the public Internet.** -{% endhint %} - -### Cross-Origin Resource Sharing (CORS) - -If you are running ArangoDB on a public port and -want a web app running on a different port or domain to access it, -you will need to enable CORS in ArangoDB. - -First you need to -[configure ArangoDB for CORS](../../../HTTP/General/index.html#cross-origin-resource-sharing-cors-requests). -As of 3.2 Foxx will then automatically whitelist all response headers as they are used. - -If you want more control over the whitelist or are using an older version of -ArangoDB you can set the following response headers in your request handler: - -- `access-control-expose-headers`: a comma-separated list of response headers. - This defaults to a list of all headers the response is actually using - (but not including any `access-control` headers). - -- `access-control-allow-credentials`: can be set to `"false"` to forbid - exposing cookies. The default value depends on whether ArangoDB - trusts the origin. See the - [notes on `http.trusted-origin`](../../../HTTP/General/index.html#cookies-and-authentication). - -Note that it is not possible to override these headers for the CORS preflight -response. It is therefore not possible to accept credentials or cookies only -for individual routes, services or databases. The origin needs to be trusted -according to the general ArangoDB configuration (see above). diff --git a/Documentation/Books/Manual/Foxx/Guides/BundledNodeModules.md b/Documentation/Books/Manual/Foxx/Guides/BundledNodeModules.md deleted file mode 100644 index fb4e5697ae04..000000000000 --- a/Documentation/Books/Manual/Foxx/Guides/BundledNodeModules.md +++ /dev/null @@ -1,49 +0,0 @@ -Bundled Node modules -==================== - -You can use the `node_modules` folder to bundle Node.js modules with your Foxx -service. Note that many third-party libraries written for Node.js or the -browser rely on async or filesystem logic -[which may not be compatible with Foxx](../README.md#compatibility-caveats). - -{% hint 'info' %} -Bundled node modules are often referred to as _dependencies_. In ArangoDB this -term can often be ambiguous because Foxx also provides a -[dependency mechanism](Dependencies.md) for linking services together. -{% endhint %} - -Use a tool like [yarn](https://yarnpkg.com) or -[npm](https://npmjs.com) to -create a `package.json` file in your service source directory and add node -dependencies as you would for any other Node.js application or library: - -```sh -cd my-foxx-service/ -echo '{"private": true}' > package.json -yarn add lodash # or: -npm install --save lodash -``` - -Make sure to include the actual `node_modules` folder in your Foxx service -bundle as ArangoDB will not automatically install these dependencies for you. -Also keep in mind that bundling extraneous modules like development -dependencies may bloat the file size of your Foxx service bundle. - -If you are using the [Foxx CLI](../../Programs/FoxxCLI/README.md) -command-line tool, you can exclude individual modules by ignoring them: - -```sh -npm install --save prettier -foxx ignore '/node_modules/prettier/' -# the 'prettier' folder will now be excluded -# in service bundles generated by foxx-cli -foxx install /my-foxx-service -``` - -Keep in mind that both yarn and npm typically also install dependencies of -your dependencies to the `node_modules` folder which you'll need to ignore as -well if you want to exclude these modules from your service bundle. - -If you are using the npm package manager, you can use -`npm install --global-style` to force these indirect dependencies -to be nested to make them easier to exclude. \ No newline at end of file diff --git a/Documentation/Books/Manual/Foxx/Guides/Cluster.md b/Documentation/Books/Manual/Foxx/Guides/Cluster.md deleted file mode 100644 index bf5a48e811d7..000000000000 --- a/Documentation/Books/Manual/Foxx/Guides/Cluster.md +++ /dev/null @@ -1,67 +0,0 @@ -Foxx in a cluster setup -======================= - -When running ArangoDB in a cluster the Foxx services will run on each -coordinator. Installing, upgrading and uninstalling services on any coordinator -will automatically distribute the changes to the service to the other coordinators, -making deployments as easy as in single-server mode. - -The same considerations that apply to writing Foxx services for a -standalone server also apply to writing services for a cluster: - -You should avoid any kind of file system state beyond the deployed service -bundle itself. Don't [write data to the file system](Files.md) or encode -any expectations of the file system state other than the files in the -service folder that were installed as part of the service -(e.g. don't use file uploads or custom log files). - -Additionally, special precautions need to be taken when using the -[development mode in a cluster](DevelopmentMode.md#in-a-cluster). - -How ArangoDB distributes services ---------------------------------- - -When you install, replace, upgrade or remove a service, these actions first -take place on a single coordinator and are then distributed to the other -coordinators. If a coordinator for some reason fails to be informed, -its periodic self-healing process will pick up the changes eventually -and apply them anyway. - -1. When installing, upgrading or replacing a service, the new service is - extracted to a temporary directory where Foxx validates the manifest file - and parses the referenced scripts and main file. - -2. When replacing, upgrading or removing a service, the old service's teardown - script is executed in a single thread of the coordinator as desired. - -3. When replacing, upgrading or installing a service, the new service's setup - script is executed in a single thread of the coordinator as desired. - -4. The validated service bundle is copied to the coordinator's service bundles - directory, extracted to the coordinator's service directory and committed - to an internal collection along with a signature. - -5. The service metadata stored in another internal collection is updated, - replaced or created with the new service bundle's signature. An upgrade - retains existing metadata like configuration and dependencies whereas - a replace completely discards any existing metadata. - -6. The existing service is unloaded from the coordinator's worker threads - and the new service is reloaded. If the new service runs into an error - at this point, the service will be marked as broken and - needs to be replaced manually. - -7. The coordinator triggers a local self-heal followed by triggering - a self-heal on all other coordinators. - -8. During the self-heal the coordinator compares the signature of the - local bundle of each service against the signature stored in that - service's metadata in the database. If necessary, the corresponding - new bundle is downloaded from the database and extracted and the service - is reloaded as in step 6 before. - -Note that this means that any service that passes the initial validation step -will complete the install, upgrade or replace process, even if any of the -consecutive steps fail (e.g. due to a runtime error encountered while executing -the service's main file or a syntax error in a required file not referenced -from the manifest directly). diff --git a/Documentation/Books/Manual/Foxx/Guides/Collections.md b/Documentation/Books/Manual/Foxx/Guides/Collections.md deleted file mode 100644 index 504edeec799b..000000000000 --- a/Documentation/Books/Manual/Foxx/Guides/Collections.md +++ /dev/null @@ -1,70 +0,0 @@ -Working with collections -======================== - -Foxx provides the [`module.context.collection`](../Reference/Context.md) method -to provide easy access to ArangoDB collections. These collections are also -called "prefixed collections" because Foxx will automatically prefix the name -based on the mount path of the service. - -The prefixes may initially feel unnecessarily verbose but help avoid conflicts -between different services with similar collection names or even multiple -copies of the same service sharing the same database. Keep in mind that you -can also use collection objects when [writing queries](Queries.md), -so you don't need to worry about writing out prefixes by hand. - -As a rule of thumb you should always use `module.context.collection` -to access collections in your service. - -Low-level collection access ---------------------------- - -ArangoDB provides a -[low-level API for managing collections](../../DataModeling/Collections/DatabaseMethods.md) -via [the `db` object](../../Appendix/References/DBObject.md). -These APIs are not very useful for most application logic but allow you to -create and destroy collections in your -[lifecycle scripts and migrations](Scripts.md#lifecycle-scripts). - -Using these methods requires you to work with fully qualified collection names. -This means instead of using `module.context.collection` to get a -_collection object_ you need to use `module.context.collectionName` -to get the prefixed _collection name_ ArangoDB understands: - -```js -"use strict"; -const { db } = require("@arangodb"); -const collectionName = module.context.collectionName("users"); - -if (!db._collection(collectionName)) { - db._createDocumentCollection(collectionName); -} -``` - -Sharing collections -------------------- - -The most obvious way to share collections between multiple services is to use -an unprefixed collection name and then use the low-level `db._collection` -method to access that collection from each service that needs access to it. - -The downside of this approach is that it results in an implicit dependency of -those services on a single collection as well as creating the potential for -subtle problems if a different service uses the same unprefixed -collection name in the future. - -The cleanest approach is to instead decide on a single service which manages -the collection and set up [explicit dependencies](Dependencies.md) between -the different services using the collection: - -```js -// in the service's main file: -exports.users = module.context.collection("users"); - -// in a dependent service's code: -const users = module.dependencies.usersService.users; -``` - -This approach not only makes the dependency on an externally managed collection -explicit but also allows having those services still use different collections -if necessary by providing multiple copies of the service that provides the -shared collection. diff --git a/Documentation/Books/Manual/Foxx/Guides/Dependencies.md b/Documentation/Books/Manual/Foxx/Guides/Dependencies.md deleted file mode 100644 index a21f0fdf4b67..000000000000 --- a/Documentation/Books/Manual/Foxx/Guides/Dependencies.md +++ /dev/null @@ -1,224 +0,0 @@ -Linking services together -========================= - -When using multiple services (or multiple copies of the same service) in the -same database, sometimes you may want to share collections or methods between -those services. Typical examples are: - -- [collections](Collections.md) or APIs for managing shared data - (e.g. application users or session data) -- common [middleware](../Reference/Routers/Middleware.md) that requires some - [configuration](../Reference/Configuration.md) that would be identical - for multiple services -- [reusable routers](Routing.md) that provide the same API - for different services - -For scenarios like these, Foxx provides a way to link services together and -allow them to export JS APIs other services can use. -In Foxx these JS APIs are called _dependencies_, -the services implementing them are called _providers_, -the services using them are called _consumers_. - -{% hint 'info' %} -This chapter is about Foxx dependencies as described above. In JavaScript the -term _dependencies_ can also refer to -[bundled node modules](BundledNodeModules.md), which are an unrelated concept. -{% endhint %} - - -Declaring dependencies ----------------------- - -Foxx dependencies can be declared in the -[service manifest](../Reference/Manifest.md) -using the `provides` and `dependencies` fields: - -- `provides` lists the dependencies a given service provides, - i.e. which APIs it claims to be compatible with - -- `dependencies` lists the dependencies a given service consumes, - i.e. which APIs its dependencies need to be compatible with - -Explicitly naming your dependencies helps improving tooling support for -managing service dependencies in ArangoDB but is not strictly necessary. -It is possible to omit the `provides` field even if your service provides a -JS API and the `dependencies` field can be used without explicitly specifying -dependency names. - -A dependency name should be an alphanumeric identifier, optionally using a -namespace prefix (i.e. `dependency-name` or `@namespace/dependency-name`). -For example, services maintained by the ArangoDB Foxx team typically use -the `@foxx` namespace whereas the `@arangodb` namespace -is reserved for internal use. - -There is no official registry for dependency names but we recommend ensuring -the dependency names you use are unambiguous and meaningful -to other developers using your services. - -A `provides` definition maps each provided dependency's name -to the provided version: - -```json -"provides": { - "@example/auth": "1.0.0" -} -``` - -A `dependencies` definition maps the _local alias_ of each consumed dependency -against a short definition that includes the name and version range: - -```json -"dependencies": { - "myAuth": { - "name": "@example/auth", - "version": "^1.0.0", - "description": "This description is entirely optional.", - "required": false, - "multiple": false - } -} -``` - -The local alias should be a valid JavaScript identifier -(e.g. a valid variable name). When a dependency has been assigned, -its JS API will be exposed in a corresponding property of the -[service context](../Reference/Context.md), -e.g. `module.context.dependencies.myAuth`. - -Assigning dependencies ----------------------- - -Like [configuration](../Reference/Configuration.md), -dependencies can be assigned using -the [web interface](../../Programs/WebInterface/Services.md), -the [Foxx CLI](../../Programs/FoxxCLI/README.md) or -the [Foxx HTTP API](../../../HTTP/Foxx/Configuration.html). - -The value for each dependency should be the database-relative mount path of -the service (including the leading slash). Both services need to be mounted in -the same database. The same service can be used to provide a dependency -for multiple services. - -Also as with configuration, a service that declares required dependencies which -have not been assigned will not be mounted by Foxx until all required -dependencies have been assigned. Instead any attempt to access the service's -HTTP API will result in an error code. - -Exporting a JS API ------------------- - -In order to provide a JS API other services can consume as a dependency, -the service's _main_ file needs to export something other services can use. -You can do this by assigning a value to the `module.exports` or properties -of the `exports` object as with any other module export: - -```js -module.exports = "Hello world"; -``` - -This also includes collections. In the following example, the collection -exported by the provider will use the provider's -[collection prefix](Collections.md) rather than the consumer's, -allowing both services to share the same collection: - -```js -module.exports = module.context.collection("shared_documents"); -``` - -Let's imagine we have a service managing our application's users. -Rather than allowing any consuming service to access the collection directly, -we can provide a number of methods to manipulate it: - -```js -const auth = require("./util/auth"); -const users = module.context.collection("users"); - -exports.login = (username, password) => { - const user = users.firstExample({ username }); - if (!user) throw new Error("Wrong username"); - const valid = auth.verify(user.authData, password); - if (!valid) throw new Error("Wrong password"); - return user; -}; -exports.setPassword = (user, password) => { - const authData = auth.create(password); - users.update(user, { authData }); - return user; -}; -``` - -Or you could even export a factory function to create an API that uses a -custom error type provided by the consumer rather than the producer: - -```js -const auth = require("./util/auth"); -const users = module.context.collection("users"); - -module.exports = (BadCredentialsError = Error) => { - return { - login(username, password) { - const user = users.firstExample({ username }); - if (!user) throw new BadCredentialsError("Wrong username"); - const valid = auth.verify(user.authData, password); - if (!valid) throw new BadCredentialsError("Wrong password"); - return user; - }, - setPassword(user, password) { - const authData = auth.create(password); - users.update(user, { authData }); - return user; - } - }; -}; -``` - -Example usage (the consumer uses the local alias `usersApi`): - -```js -"use strict"; -const createRouter = require("@arangodb/foxx/router"); -const joi = require("joi"); - -// Using the dependency with arguments -const AuthFailureError = require("./errors/auth-failure"); -const createUsersApi = module.context.dependencies.usersApi; -const users = createUsersApi(AuthFailureError); - -const router = createRouter(); -module.context.use(router); - -router.use((req, res, next) => { - try { - next(); - } catch (e) { - if (e instanceof AuthFailureError) { - res.status(401); - res.json({ - error: true, - message: e.message - }); - } else { - console.error(e.stack); - res.status(500); - res.json({ - error: true, - message: "Something went wrong." - }); - } - } -}); - -router - .post("/login", (req, res) => { - const { username, password } = req.body; - const user = users.login(username, password); - // handle login success - res.json({ welcome: username }); - }) - .body( - joi.object().keys({ - username: joi.string().required(), - password: joi.string().required() - }) - ); -``` diff --git a/Documentation/Books/Manual/Foxx/Guides/DevelopmentMode.md b/Documentation/Books/Manual/Foxx/Guides/DevelopmentMode.md deleted file mode 100644 index 560ded98768a..000000000000 --- a/Documentation/Books/Manual/Foxx/Guides/DevelopmentMode.md +++ /dev/null @@ -1,84 +0,0 @@ -Development mode -================ - -Development mode allows developers to make changes to deployed services -in-place directly on the database server's file system without downloading -and re-uploading the service bundle. This can help during rapid development -of service prototypes or diagnosing complex problems. - -You can toggle development mode on and off using -the [Foxx CLI](../../Programs/FoxxCLI/README.md), -the [HTTP API](../../../HTTP/Foxx/Miscellaneous.html) or -in the service settings tab of the web interface. - -{% hint 'info' %} -To find out where a service's active source files are stored, check the -service settings in the web interface or the service details when using -the Foxx CLI or HTTP API. The root folder for all services can also be -set explicitly by overriding the `javascript.app-path` option in the -ArangoDB configuration. -{% endhint %} - -In development mode the service's source files and manifest will be -re-evaluated, and its setup script (if present) re-executed, -every time a route of the service is accessed, -effectively re-deploying the service on every request. -Additionally, error responses generated by Foxx will include stacktraces and -when viewed in a browser may include relevant sections of the service code -that generated the error. - -{% hint 'warning' %} -To avoid losing your changes, we recommend using a tool like -[lsyncd](https://github.com/axkibe/lsyncd) to synchronize changes from your -local working copy to the service files continuously rather than modifying -those files directly. -Alternatively you can easily re-deploy your local copy of the service using -the [Foxx CLI](../../Programs/FoxxCLI/README.md). -{% endhint %} - -There are a number of caveats you should be aware of -when using development mode: - -- the additional information provided in error responses can leak - critical information like source code and file system paths - -- parallel requests may result in race conditions as the setup script - may be executed in multiple threads in parallel - (outside development mode the setup would only be executed in one thread) - -- the setup script will likely be executed numerous times, although - [using additional migration scripts](Scripts.md#migrations) - may help avoiding some of the added overhead - -- if you are [serving static files](Files.md#serving-files), - keep in mind that requests to these files will still result in - a re-deployment of the service - -- making HTTP requests to the service via `@arangodb/request` - (e.g. [as part of an integration test](Testing.md)) - also results in re-deployment, which can result in inconsistent behavior - -- the service files should be treated as highly volatile as they will - be erased if the service is uninstalled/replaced or the database removed - -For these reasons we strongly advise against using development mode -on production servers. - -In a cluster ------------- - -{% hint 'danger' %} -Using development mode in a production cluster -is extremely unsafe and highly discouraged. -{% endhint %} - -Development mode in a cluster applies to each coordinator individually. -Changes to the service's file system on a single coordinator will be reflected -as usual but only on that single coordinator. -When development mode is disabled on one coordinator, -it will create a new service bundle from the local changes and -distribute it across the cluster to the other coordinators. - -This can result in problems when service code is modified -on multiple coordinators. Development mode should therefore only be used -for diagnostic purposes and avoided if possible. diff --git a/Documentation/Books/Manual/Foxx/Guides/Files.md b/Documentation/Books/Manual/Foxx/Guides/Files.md deleted file mode 100644 index 3582e51e7c11..000000000000 --- a/Documentation/Books/Manual/Foxx/Guides/Files.md +++ /dev/null @@ -1,110 +0,0 @@ -File access in Foxx -=================== - -Files within the service folder should always be considered read-only. -You should not expect to be able to write to your service folder or -modify any existing files. - -ArangoDB is primarily a database. In most cases the best place to store data -is therefore inside the database, not on the file system. - -Serving files -------------- - -The most flexible way to serve files in your Foxx service is to simply -pass them through in your router using -the [context object's `fileName` method](../Reference/Context.md#filename) and -the [response object's `sendFile` method](../Reference/Routers/Response.md#sendfile): - -```js -router.get("/some/filename.png", function(req, res) { - const filePath = module.context.fileName("some-local-filename.png"); - res.sendFile(filePath); -}); -``` - -While allowing for greater control of how the file should be sent to -the client and who should be able to access it, -doing this for all your static assets can get tedious. - -Alternatively you can specify file assets that should be served by your -Foxx service directly in the [service manifest](../Reference/Manifest.md) -using the `files` attribute: - -```json -"files": { - "/some/filename.png": { - "path": "some-local-filename.png", - "type": "image/png", - "gzip": false - }, - "/favicon.ico": "bookmark.ico", - "/static": "my-assets-folder" -} -``` - -Writing files -------------- - -It is almost always an extremely bad idea to attempt to modify -the filesystem from within a service: - -- The service folder itself is considered an implementation artefact and - **may be discarded and replaced without warning**. - ArangoDB maintains a canonical copy of each service internally to - detect missing or damaged services and restore them automatically. - -- ArangoDB uses multiple V8 contexts to allow handling multiple - Foxx requests in parallel. Writing to the same file in a request handler - may therefore cause race conditions and **result in corrupted data**. - -- Writing to files outside the service folder introduces external state. In - a cluster this will result in coordinators no longer being interchangeable. - -- Writing to files during setup is unreliable because the setup script may - be executed several times or not at all. In a cluster the setup script - will only be executed on a single coordinator. - -Therefore it is almost always a better option to store files using a -specialized, external file storage service -and handle file uploads outside Foxx itself. - -However in some cases it may be feasible to store smaller files directly in -ArangoDB documents by using a separate collection. - -{% hint 'danger' %} -Due to the way ArangoDB stores documents internally, you should not store -file contents alongside other attributes that might be updated independently. -Additionally, large file sizes will impact performance for operations -involving the document and may affect overall database performance. -{% endhint %} - -{% hint 'warning' %} -In production, you should avoid storing any files in ArangoDB or handling file -uploads in Foxx. The following example will work for moderate amounts of small -files but is not recommended for large files or frequent uploads or -modifications. -{% endhint %} - -To store files in a document you can simply convert the file contents -as a `Buffer` to a base64-encoded string: - -```js -router.post('/avatars/:filename', (req, res) => { - collection.save({ - filename: req.pathParams.filename, - data: req.body.toString('base64') - }); - res.status('no content'); -}); -router.get('/avatars/:filename', (req, res) => { - const doc = collection.firstExample({ - filename: req.pathParams.filename - }); - if (!doc) res.throw('not found'); - const data = new Buffer(doc.data, 'base64'); - res.set('content-type', 'image/png'); - res.set('content-length', data.length); - res.write(data); -}); -``` diff --git a/Documentation/Books/Manual/Foxx/Guides/LegacyMode.md b/Documentation/Books/Manual/Foxx/Guides/LegacyMode.md deleted file mode 100644 index a97c0d492a4a..000000000000 --- a/Documentation/Books/Manual/Foxx/Guides/LegacyMode.md +++ /dev/null @@ -1,96 +0,0 @@ -Working with 2.x services -========================= - -ArangoDB 3 continues to support Foxx services written for ArangoDB 2.8 by -running them in a special legacy compatibility mode that provides access to -some of the modules and APIs no longer provided in 3.0 and beyond. - -{% hint 'warning' %} -Legacy compatibility mode is strictly intended as a temporary stop -gap solution for supporting existing services while -[upgrading to ArangoDB 3.x](../Migrating2x/README.md) -and is not a permanent feature of ArangoDB or Foxx. It is considered -as deprecated from v3.4.0 on. -{% endhint %} - -In order to mark an existing service as a legacy service, -make sure the following attribute is defined in the service manifest: - -```json -"engines": { - "arangodb": "^2.8.0" -} -``` - -This [semantic version range](http://semver.org) denotes that the service -is known to work with ArangoDB 2.8.0 and supports all newer versions of -ArangoDB up to but not including 3.0.0 and later. - -Any similar version range the does not include 3.0.0 or greater will have -the same effect (e.g. `^2.5.0` will also trigger the legacy compatibility mode, -as will `1.2.3`, but `>=2.8.0` will not as it indicates compatibility -with *all* versions greater or equal 2.8.0, -not just those within the 2.x version range). - -Features supported in legacy compatibility mode ------------------------------------------------ - -Legacy compatibility mode supports the old manifest format, specifically: - -* `main` is ignored -* `controllers` will be mounted as in 2.8 -* `exports` will be executed as in 2.8 - -Additionally the `isSystem` attribute will be ignored if present but -does not result in a warning in legacy compatibility mode. - -The Foxx console is available as the `console` pseudo-global variable -(shadowing the global console object). - -The service context is available as the `applicationContext` pseudo-global -variable in the `controllers`, `exports`, `scripts` and `tests` as in 2.8. -The following additional properties are available on the service context -in legacy compatibility mode: - -* `path()` is an alias for 3.x `fileName()` (using `path.join` to build file paths) -* `fileName()` behaves as in 2.x (using `fs.safeJoin` to build file paths) -* `foxxFileName()` is an alias for 2.x `fileName` -* `version` exposes the service manifest's `version` attribute -* `name` exposes the service manifest's `name` attribute -* `options` exposes the service's raw options - -The following methods are removed on the service context in legacy compatibility mode: - -* `use()` – use `@arangodb/foxx/controller` instead -* `apiDocumentation()` – use `controller.apiDocumentation()` instead -* `registerType()` – not supported in legacy compatibility mode - -The following modules that have been removed or replaced in 3.0.0 are -available in legacy compatibility mode: - -* `@arangodb/foxx/authentication` -* `@arangodb/foxx/console` -* `@arangodb/foxx/controller` -* `@arangodb/foxx/model` -* `@arangodb/foxx/query` -* `@arangodb/foxx/repository` -* `@arangodb/foxx/schema` -* `@arangodb/foxx/sessions` -* `@arangodb/foxx/template_middleware` - -The `@arangodb/foxx` module also provides the same exports as in 2.8, namely: - -* `Controller` from `@arangodb/foxx/controller` -* `createQuery` from `@arangodb/foxx/query` -* `Model` from `@arangodb/foxx/model` -* `Repository` from `@arangodb/foxx/repository` -* `toJSONSchema` from `@arangodb/foxx/schema` -* `getExports` and `requireApp` from `@arangodb/foxx/manager` -* `queues` from `@arangodb/foxx/queues` - -Any feature not supported in 2.8 will also not work in legacy compatibility mode. -When migrating from an older version of ArangoDB it is a good idea to -migrate to ArangoDB 2.8 first for an easier upgrade path. - -Additionally, please note the differences laid out in the chapter on -[migrating from pre-2.8](../Migrating2x/Wayback.md) in the migration guide. diff --git a/Documentation/Books/Manual/Foxx/Guides/MakingRequests.md b/Documentation/Books/Manual/Foxx/Guides/MakingRequests.md deleted file mode 100644 index 1679a674e6ef..000000000000 --- a/Documentation/Books/Manual/Foxx/Guides/MakingRequests.md +++ /dev/null @@ -1,41 +0,0 @@ -Making requests -=============== - -ArangoDB is primarily a database, so Foxx doesn't offer the same level of -network access as more general-purpose JavaScript environments like Node.js. -However ArangoDB does provide the -[`@arangodb/request` module](../../Appendix/JavaScriptModules/Request.md) -for making HTTP (or HTTPS) requests: - -```js -"use strict"; -const request = require("@arangodb/request"); -const response = request.get( - "https://pokeapi.co/api/v2/pokemon/25/" -); -if (response.status < 400) { - const pikachu = response.json; - console.log(pikachu); -} -``` - -{% hint 'warning' %} -Because -[Foxx services are always synchronous](../README.md#compatibility-caveats) -and network requests can be considerably slower than any other -database operation, you should avoid making requests in your service -if possible or use [queues](Scripts.md#queues) instead. -{% endhint %} - -By using an absolute path instead of a full URL, you can also use the -request module to talk to ArangoDB itself, -for example in [integration tests](Testing.md#integration-testing): - -```js -const response = request.get("/_db/_system/myfoxx/something"); -``` - -**Note**: Although making local requests doesn't create the network overhead -as making requests to other servers, special care needs to be taken when -talking to services on the same server. If you want to connect services -in the same database [you should use dependencies instead](Dependencies.md). diff --git a/Documentation/Books/Manual/Foxx/Guides/Queries.md b/Documentation/Books/Manual/Foxx/Guides/Queries.md deleted file mode 100644 index db51e3bd8795..000000000000 --- a/Documentation/Books/Manual/Foxx/Guides/Queries.md +++ /dev/null @@ -1,220 +0,0 @@ -Writing queries -=============== - -ArangoDB provides the `query` template string handler -(or [template tag](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Template_literals)) -to make it easy to write and execute [AQL queries](../../../AQL/index.html) -in your Foxx services: - -```js -const { query } = require("@arangodb"); -const max = 13; -const oddNumbers = query` - FOR i IN 1..${max} - FILTER i % 2 == 1 - RETURN i -`.toArray(); -console.log(oddNumbers); // 1,3,5,7,9,11,13 -``` - -Any values passed via interpolation (i.e. using the `${expression}` syntax) -are passed to ArangoDB as -[AQL bind parameters](../../../AQL/Fundamentals/BindParameters.html), -so you don't have to worry about escaping them in order to protect against -injection attacks in user-supplied data. - -The result of the executed query is -[an ArangoDB array cursor](../../../AQL/Invocation/WithArangosh.html#cursors). -You can extract all query results using the `toArray()` method or -step through the result set using the `next()` method. - -You can also consume a cursor with a for-loop: - -```js -const cursor = query` - FOR i IN 1..5 - RETURN i -`; -for (const item of cursor) { - console.log(item); -} -``` - -Using collections ------------------ - -When [working with collections in your service](Collections.md) you generally -want to avoid hardcoding exact collection names. But if you pass a -collection name directly to a query it will be treated as a string: - -```js -// THIS DOES NOT WORK -const users = module.context.collectionName("users"); -// e.g. "myfoxx_users" -const admins = query` - FOR user IN ${users} - FILTER user.isAdmin - RETURN user -`.toArray(); // ERROR -``` - -Instead you need to pass an ArangoDB collection object: - -```js -const users = module.context.collection("users"); -// users is now a collection, not a string -const admins = query` - FOR user IN ${users} - FILTER user.isAdmin - RETURN user -`.toArray(); -``` - -Note that you don't need to use any different syntax to use -a collection in a query, but you do need to make sure the collection is -an actual ArangoDB collection object rather than a plain string. - -Low-level access ----------------- - -In addition to the `query` template tag, ArangoDB also provides -the `aql` template tag, which only generates a query object -but doesn't execute it: - -```js -const { db, aql } = require("@arangodb"); -const max = 7; -const query = aql` - FOR i IN 1..${max} - RETURN i -`; -const numbers = db._query(query).toArray(); -``` - -You can also use the `db._query` method to execute queries using -plain strings and passing the bind parameters as an object: - -```js -// Note the lack of a tag, this is a normal string -const query = ` - FOR user IN @@users - FILTER user.isAdmin - RETURN user -`; -const admins = db._query(query, { - // We're passing a string instead of a collection - // because this is an explicit collection bind parameter - // using the AQL double-at notation - "@users": module.context.collectionName("users") -}).toArray(); -``` - -Note that when using plain strings as queries ArangoDB provides -no safeguards to prevent accidental AQL injections: - -```js -// Malicious user input where you might expect a number -const evil = "1 FOR u IN myfoxx_users REMOVE u IN myfoxx_users"; -// DO NOT DO THIS -const numbers = db._query(` - FOR i IN 1..${evil} - RETURN i -`).toArray(); -// Actual query executed by the code: -// FOR i IN i..1 -// FOR u IN myfoxx_users -// REMOVE u IN myfoxx_users -// RETURN i -``` - -If possible, you should always use the `query` or `aql` template tags -rather than passing raw query strings to `db._query` directly. - -AQL fragments -------------- - -If you need to insert AQL snippets dynamically, you can still use -the `query` template tag by using the `aql.literal` helper function to -mark the snippet as a raw AQL fragment: - -```js -const filter = aql.literal( - adminsOnly ? 'FILTER user.isAdmin' : '' -); -const result = query` - FOR user IN ${users} - ${filter} - RETURN user -`.toArray(); -``` - -Both the `query` and `aql` template tags understand fragments marked -with the `aql.literal` helper and inline them directly into the query -instead of converting them to bind parameters. - -Note that because the `aql.literal` helper takes a raw string as argument -the same security implications apply to it as when writing raw AQL queries -using plain strings: - -```js -// Malicious user input where you might expect a condition -const evil = "true REMOVE u IN myfoxx_users"; -// DO NOT DO THIS -const filter = aql.literal(`FILTER ${evil}`); -const result = query` - FOR user IN ${users} - ${filter} - RETURN user -`.toArray(); -// Actual query executed by the code: -// FOR user IN myfoxx_users -// FILTER true -// REMOVE user IN myfoxx_users -// RETURN user -``` - -A typical scenario that might result in an exploit like this is taking -arbitrary strings from a search UI to filter or sort results by a field name. -Make sure to restrict what values you accept. - -Managing queries in your service --------------------------------- - -In many cases it may be initially more convenient to perform queries -right where you use their results: - -```js -router.get("/emails", (req, res) => { - res.json(query` - FOR u IN ${users} - FILTER u.active - RETURN u.email - `.toArray()) -}); -``` - -However to [help testability](Testing.md) and make the queries more reusable, -it's often a good idea to move them out of your request handlers -into separate functions, e.g.: - -```js -// in queries/get-user-emails.js -"use strict"; -const { query, aql } = require("@arangodb"); -const users = module.context.collection("users"); -module.exports = (activeOnly = true) => query` - FOR user IN ${users} - ${aql.literal(activeOnly ? "FILTER user.active" : "")} - RETURN user.email -`.toArray(); - -// in your router -const getUserEmails = require("../queries/get-user-emails"); - -router.get("/active-emails", (req, res) => { - res.json(getUserEmails(true)); -}); -router.get("/all-emails", (req, res) => { - res.json(getUserEmails(false)); -}); -``` diff --git a/Documentation/Books/Manual/Foxx/Guides/README.md b/Documentation/Books/Manual/Foxx/Guides/README.md deleted file mode 100644 index c070a7db5f0a..000000000000 --- a/Documentation/Books/Manual/Foxx/Guides/README.md +++ /dev/null @@ -1,32 +0,0 @@ -Guides -====== - -These guides provide solutions to common problems when building -applications with Foxx services: - -Up and running --------------- - -- [Working with routers](Routing.md) -- [Working with collections](Collections.md) -- [Writing queries](Queries.md) -- [Development mode](DevelopmentMode.md) -- [Testing Foxx services](Testing.md) -- [Foxx in a cluster](Cluster.md) - -Next steps ----------- - -- [Scripts and scheduling](Scripts.md) -- [Using Node modules](BundledNodeModules.md) -- [Using Webpack with Foxx](Webpack.md) -- [Authentication and sessions](Auth.md) -- [Linking services together](Dependencies.md) - -Advanced topics ---------------- - -- [Working with files](Files.md) -- [Making requests](MakingRequests.md) -- [Access from the browser](Browser.md) -- [Working with 2.x services](LegacyMode.md) diff --git a/Documentation/Books/Manual/Foxx/Guides/Routing.md b/Documentation/Books/Manual/Foxx/Guides/Routing.md deleted file mode 100644 index 5692c5c9641e..000000000000 --- a/Documentation/Books/Manual/Foxx/Guides/Routing.md +++ /dev/null @@ -1,125 +0,0 @@ -Working with routers -==================== - -In Foxx [routers](../Reference/Routers/README.md) are used to define -the URLs of your API. The easiest way to use a router is to mount it -directly in the service using the [context](../Reference/Context.md): - -```js -const createRouter = require("@arangodb/foxx/router"); -const router = createRouter(); - -module.context.use(router); -``` - -Nested routers --------------- - -Instead of mounting routers where they are defined, routers can also be -exported from one module and imported in another. This allows you to -structure your routes by splitting them across multiple files: - -```js -// in your main file -const usersRouter = require("./api/users"); -module.context.use("/users", usersRouter); - -// in api/users/index.js -const createRouter = require("@arangodb/foxx/router"); -const usersRouter = createRouter(); -module.exports = usersRouter; - -usersRouter.get("/me", (req, res) => { - // this will be exposed as /users/me -}); -``` - -You can also mount routers inside of each other: - -```js -// in api/users/friends.js -const createRouter = require("@arangodb/foxx/router"); -const friendsRouter = createRouter(); -module.exports = friendsRouter; - -// in api/users/index.js -const friendsRouter = require("./friends"); -usersRouter.use("/friends", friendsRouter); -``` - -Note that you can also mount several routers with the same prefix -or even without a prefix: - -```js -adminRouter.use(usersAdminRouter); -adminRouter.use(groupsAdminRouter); -``` - -### Local middleware - -Router-level middleware only applies to the router it is applied to and -is not shared between multiple routers mounted at the same prefix -(or without a prefix). - -This can be especially useful when restricting access to -some routes but not others: - -```js -const createRouter = require("@arangodb/foxx/router"); -const publicRoutes = createRouter(); -const authedRoutes = createRouter(); - -authedRoutes.use((req, res, next) => { - if (req.session.uid) { - next(); - } else { - res.throw("unauthorized"); - } -}); - -module.context.use(publicRoutes); -module.context.use(authedRoutes); -``` - -Router factories ----------------- - -Sometimes you may have routers you want to use in multiple projects or -use at multiple points of your API but with slightly different implementations -or using different collections. - -In these cases it can be useful to return the router from a function that -takes these differences as arguments instead of exporting the router directly: - -```js -// in your main file -const createUsersRouter = require("../util/users-router"); -const usersRouter = createUsersRouter( - module.context.collection("users"), - "username" -); -module.context.use(usersRouter); - -// in util/users-router.js -const createRouter = require("@arangodb/foxx/router"); -const { query } = require("@arangodb"); - -module.export = (usersCollection, keyField) => { - const router = createRouter(); - router.use((req, res) => { - if (!req.session || !req.session.uid) { - res.throw("unauthorized"); - } - }); - router.get("/me", (req, res) => { - const user = query` - FOR user IN ${usersCollection} - FILTER user[${keyField}] == ${req.session.uid} - LIMIT 1 - RETURN user - `.next(); - res.json(user); - }); - return router; -}; -``` diff --git a/Documentation/Books/Manual/Foxx/Guides/Scripts.md b/Documentation/Books/Manual/Foxx/Guides/Scripts.md deleted file mode 100644 index 40881c36cb77..000000000000 --- a/Documentation/Books/Manual/Foxx/Guides/Scripts.md +++ /dev/null @@ -1,157 +0,0 @@ -Scripts and scheduling -====================== - -In addition to the main entry point which defines your service's -[routes](Routing.md) and -[exports](Dependencies.md) you can define scripts -that need to be invoked directly and can be used to implement one-off tasks -or scheduled and recurring jobs using queues. - -These scripts can be declared in the `scripts` section of -the [service manifest](../Reference/Manifest.md): - -```json -"scripts": { - "setup": "scripts/setup.js", - "send-mail": "scripts/send-mail.js" -} -``` - -Invoking scripts ----------------- - -Scripts can be invoked manually using -the [web interface](../../Programs/WebInterface/Services.md), -the [Foxx CLI](../../Programs/FoxxCLI/README.md) or -the [Foxx HTTP API](../../../HTTP/Foxx/Miscellaneous.html). - -Additionally the special `setup` and `teardown` lifecycle scripts can -be invoked automatically by Foxx as part of a service's lifecycle (see below). - -Script arguments and return values ----------------------------------- - -When invoking a script any arguments will be exposed to the script as the -`argv` array property of the [service context](../Reference/Context.md). - -Any value exported by the script using `module.exports` will be the script's -return value. Please note that this data will be converted to JSON. - -Any errors raised by the script will be handled depending on how -the script was invoked: - -* if the script was invoked manually (e.g. using the Foxx CLI), it will return - an error response using the exception's `statusCode` property or `500`. - -* if the script was invoked from a Foxx job queue, the job's failure counter - will be incremented and the job will be rescheduled or - marked as failed if no attempts remain. - -**Examples** - -The following script will use its argument to generate a personal greeting: - -```js -'use strict'; -const { argv } = module.context; - -module.exports = `Hello ${argv[0]}!`; -``` - -Lifecycle Scripts ------------------ - -Scripts named `setup` or `teardown` are considered lifecycle scripts and -will (by default) be invoked automatically by Foxx: - -* when a service is installed, upgraded or replaced, the new service's - `setup` script will be executed before it is mounted - -* when a service is removed or replaced, the old service's `teardown` - script will be executed before it is unmounted - -* when a service is upgraded, the old service's `teardown` script *can* - optionally be executed before it is unmounted - -However it's possible to override this behavior as needed. - -Note that in these cases the scripts will always be invoked without arguments -and their exports will be ignored. - -### Setup Script - -The setup script is typically used to create collections a service needs, -to define indexes or to initialize collections with necessary data -like administrative accounts. - -As the setup script may be executed more than once it should be treated -as reentrant: running the setup script again should not result in any errors -or duplicate data: - -```js -const { db } = require("@arangodb"); -const users = module.context.collectionName("users"); - -if (!db._collection(users)) { - // This won't be run if the collection exists - const collection = db._createDocumentCollection(users); - collection.ensureIndex({ - type: "hash", - unique: true, - fields: ["username"] - }); - collection.save({ - username: "admin", - password: auth.create("hunter2") - }); -} -``` - -### Teardown Script - -The teardown script typically removes the collections and/or -documents created by the service's setup script. - -In practice teardown scripts are rarely used due to the risk of -catastrophic data loss when accidentally running the script -while managing the service. - -Migrations ----------- - -Depending on the amount of data managed by the service and the amount of work -that needs to be done to prepare collections for the service, -running a `setup` script on every upgrade can be very expensive. - -An alternative approach is to perform incremental steps in separate -migration scripts and run them manually after the service is installed. - -A `setup` script should always create all the collections a service uses -but any additional steps like creating indexes, importing data fixtures or -migrating existing data can safely be performed in separate scripts. - -Queues ------- - -Services can schedule scripts of any service mounted in the same database -using [Foxx queues](../Reference/Modules/Queues.md): - -```js -"use strict"; -const queues = require("@arangodb/foxx/queues"); -const queue = queues.get("default"); - -// later -router.post("/signup", (req, res) => { - const user = performSignup(req.body); - // schedule sending welcome e-mail using a script - queue.push( - { - mount: module.context.mount, // i.e. this current service - name: "send-mail" // script name in the service manifest - }, - { to: user.email, body: welcomeEmailText } // arguments - ); -}); -``` - diff --git a/Documentation/Books/Manual/Foxx/Guides/Testing.md b/Documentation/Books/Manual/Foxx/Guides/Testing.md deleted file mode 100644 index b673bb5581a2..000000000000 --- a/Documentation/Books/Manual/Foxx/Guides/Testing.md +++ /dev/null @@ -1,260 +0,0 @@ -Testing Foxx services -===================== - -Foxx provides out of the box support for running tests against an -installed service using an API similar to -the [Mocha test runner](https://mochajs.org). - -Test files have full access to the [service context](../Reference/Context.md) -and all ArangoDB APIs but can not define Foxx routes. - -Test files can be specified in the [service manifest](../Reference/Manifest.md) -using either explicit paths of each individual file or patterns that can -match multiple files (even if multiple patterns match the same file, -it will only be executed once): - - -```json -{ - "tests": [ - "some-specific-test-file.js", - "test/**/*.js", - "**/*.spec.js", - "**/__tests__/**/*.js" - ] -} -``` - -To run a service's tests you can use -the [web interface](../../Programs/WebInterface/Services.md), -the [Foxx CLI](../../Programs/FoxxCLI/README.md) or -the [Foxx HTTP API](../../../HTTP/Foxx/Miscellaneous.html). -Foxx will execute all test cases in the matching files and -generate a report in the desired format. - -{% hint 'danger' %} -Running tests in a production environment is not recommended and -may result in data loss if the tests involve database access. -{% endhint %} - -Writing tests -------------- - -ArangoDB bundles the [`chai` library](http://www.chaijs.com), -which can be used to define test assertions: - -```js -"use strict"; -const { expect } = require("chai"); - -// later -expect("test".length).to.equal(4); -``` - -Alternatively ArangoDB also provides an implementation of -[Node's `assert` module](https://nodejs.org/api/assert.html): - -```js -"use strict"; -const assert = require("assert"); - -// later -assert.equal("test".length, 4); -``` - -Test cases can be defined in any of the following ways using helper functions -injected by Foxx when executing the test file: - -### Functional style - -Test cases are defined using the `it` function and can be grouped in -test suites using the `describe` function. Test suites can use the -`before` and `after` functions to prepare and cleanup the suite and -the `beforeEach` and `afterEach` functions to prepare and cleanup -each test case individually. - -The `it` function also has the aliases `test` and `specify`. - -The `describe` function also has the aliases `suite` and `context`. - -The `before` and `after` functions also have -the aliases `suiteSetup` and `suiteTeardown`. - -The `beforeEach` and `afterEach` functions also have -the aliases `setup` and `teardown`. - -**Note**: These functions are automatically injected into the test file and -don't have to be imported explicitly. The aliases can be used interchangeably. - -```js -"use strict"; -const { expect } = require("chai"); - -test("a single test case", () => { - expect("test".length).to.equal(4); -}); - -describe("a test suite", () => { - before(() => { - // This runs before the suite's first test case - }); - after(() => { - // This runs after the suite's last test case - }); - beforeEach(() => { - // This runs before each test case of the suite - }); - afterEach(() => { - // This runs after each test case of the suite - }); - it("is a test case in the suite", () => { - expect(4).to.be.greaterThan(3); - }); - it("is another test case in the suite", () => { - expect(4).to.be.lessThan(5); - }); -}); - -suite("another test suite", () => { - test("another test case", () => { - expect(4).to.be.a("number"); - }); -}); - -context("yet another suite", () => { - specify("yet another case", () => { - expect(4).to.not.equal(5); - }); -}); -``` - -### Exports style - -Test cases are defined as methods of plain objects assigned to test suite -properties on the `exports` object: - -```js -"use strict"; -const { expect } = require("chai"); - -exports["this is a test suite"] = { - "this is a test case": () => { - expect("test".length).to.equal(4); - } -}; -``` - -Methods named `before`, `after`, `beforeEach` and `afterEach` behave similarly -to the corresponding functions in the functional style described above: - -```js -exports["a test suite"] = { - before: () => { - // This runs before the suite's first test case - }, - after: () => { - // This runs after the suite's last test case - }, - beforeEach: () => { - // This runs before each test case of the suite - }, - afterEach: () => { - // This runs after each test case of the suite - }, - "a test case in the suite": () => { - expect(4).to.be.greaterThan(3); - }, - "another test case in the suite": () => { - expect(4).to.be.lessThan(5); - } -}; -``` - -Unit testing ------------- - -The easiest way to make your Foxx service unit-testable is to extract -critical logic into side-effect-free functions and move these functions into -modules your tests (and router) can require: - -```js -// in your router -const lookupUser = require("../util/users/lookup"); -const verifyCredentials = require("../util/users/verify"); -const users = module.context.collection("users"); - -router.post("/login", function (req, res) { - const { username, password } = req.body; - const user = lookupUser(username, users); - verifyCredentials(user, password); - req.session.uid = user._id; - res.json({ success: true }); -}); - -// in your tests -const verifyCredentials = require("../util/users/verify"); -describe("verifyCredentials", () => { - it("should throw when credentials are invalid", () => { - expect(() => verifyCredentials( - { authData: "whatever" }, - "invalid password" - )).to.throw() - }); -}) -``` - -Integration testing -------------------- - -{% hint 'warning' %} -You should avoid running integration tests while a service -is mounted in [development mode](DevelopmentMode.md) as each request -will cause the service to be reloaded. -{% endhint %} - -You can [use the `@arangodb/request` module](MakingRequests.md) -to let tests talk to routes of the same service. - -When the request module is used with a path instead of a full URL, -the path is resolved as relative to the ArangoDB instance. -Using the `baseUrl` property of the [service context](../Reference/Context.md) -we can use this to make requests to the service itself: - -```js -"use strict"; -const { expect } = require("chai"); -const request = require("@arangodb/request"); -const { baseUrl } = module.context; - -describe("this service", () => { - it("should say 'Hello World!' at the index route", () => { - const response = request.get(baseUrl); - expect(response.status).to.equal(200); - expect(response.body).to.equal("Hello World!"); - }); - it("should greet us with name", () => { - const response = request.get(`${baseUrl}/Steve`); - expect(response.status).to.equal(200); - expect(response.body).to.equal("Hello Steve!"); - }); -}); -``` - -An implementation passing the above tests could look like this: - -```js -"use strict"; -const createRouter = require("@arangodb/foxx/router"); -const router = createRouter(); -module.context.use(router); - -router.get((req, res) => { - res.write("Hello World!"); -}) -.response(["text/plain"]); - -router.get("/:name", (req, res) => { - res.write(`Hello ${req.pathParams.name}!`); -}) -.response(["text/plain"]); -``` diff --git a/Documentation/Books/Manual/Foxx/Guides/Webpack.md b/Documentation/Books/Manual/Foxx/Guides/Webpack.md deleted file mode 100644 index c369dcbf90c7..000000000000 --- a/Documentation/Books/Manual/Foxx/Guides/Webpack.md +++ /dev/null @@ -1,101 +0,0 @@ -Using Webpack with Foxx -======================= - -You can use [Webpack](https://webpack.js.org/) to compile your Foxx services -the same way you would compile any other JavaScript code. -However there are a few things you will need to keep in mind. - -Basic configuration -------------------- - -Because the ArangoDB JavaScript environment is largely compatible with Node.js, -the starting point looks fairly similar: - -```js -"use strict"; -module.exports = { - mode: "production", - target: "node", - output: { - libraryTarget: "commonjs2" - }, - externals: [/^@arangodb(\/|$)/] -}; -``` - -The service context -------------------- - -Foxx extends the `module` object with a special `context` property that -reflects the current [service context](../Reference/Context.md). -As Webpack compiles multiple modules into a single file your code will -not be able to access the real `module` object provided by ArangoDB. - -To work around this limitation you can use the `context` provided by the -[`@arangodb/locals` module](../Reference/Modules/README.md#the-arangodblocals-module): - -```js -const { context } = require("@arangodb/locals"); -``` - -This object is identical to `module.context` and can be used as -a drop-in replacement: - -```js -const { context } = require("@arangodb/locals"); -const createRouter = require("@arangodb/foxx/router"); - -const router = createRouter(); -context.use(router); -``` - -Externals ---------- - -By default Webpack will attempt to include any dependency your code imports. -This makes it easy to use third-party modules without worrying about -[filtering `devDependencies`](BundledNodeModules.md) -but causes problems when importing modules provided by ArangoDB. - -Most modules that are specific to ArangoDB or Foxx reside in the `@arangodb` -namespace. This makes it fairly straightforward to tell Webpack to ignore -them using the `externals` option: - -```js -module.exports = { - // ... - externals: [/^@arangodb(\/|$)/] -}; -``` - -You can also use this to exclude other modules provided by ArangoDB, -like the `joi` validation library: - -```js -module.exports = { - // ... - externals: [/^@arangodb(\/|$)/, "joi"] -}; -``` - -Compiling scripts ------------------ - -As far as Webpack is concerned, scripts are additional entry points: - -```js -const path = require("path"); -module.exports = { - // ... - context: path.resolve(__dirname, "src"), - entry: { - main: "./index.js", - setup: "./scripts/setup.js" - } -}; -``` - -**Note**: If your scripts are sharing a lot of code with each other or -the rest of the service this can result in some overhead as the shared code -will be included in each output file. A possible solution would be to -extract the shared code into a separe bundle. diff --git a/Documentation/Books/Manual/Foxx/Migrating2x/Auth.md b/Documentation/Books/Manual/Foxx/Migrating2x/Auth.md deleted file mode 100644 index 94c100d0bbf7..000000000000 --- a/Documentation/Books/Manual/Foxx/Migrating2x/Auth.md +++ /dev/null @@ -1,29 +0,0 @@ -Auth and OAuth2 -=============== - -The `util-simple-auth` and `util-oauth2` Foxx services have been replaced with the [Foxx auth](../Reference/Modules/Auth.md) -and [Foxx OAuth2](../Reference/Modules/OAuth2.md) modules. -It is no longer necessary to install these services as dependencies in order to use the functionality. - -Old: - -```js -'use strict'; -const auth = applicationContext.dependencies.simpleAuth; - -// ... - -const valid = auth.verifyPassword(authData, password); -``` - -New: - -```js -'use strict'; -const createAuth = require('@arangodb/foxx/auth'); -const auth = createAuth(); // Use default configuration - -// ... - -const valid = auth.verifyPassword(authData, password); -``` diff --git a/Documentation/Books/Manual/Foxx/Migrating2x/Context.md b/Documentation/Books/Manual/Foxx/Migrating2x/Context.md deleted file mode 100644 index 9ae07b09de53..000000000000 --- a/Documentation/Books/Manual/Foxx/Migrating2x/Context.md +++ /dev/null @@ -1,18 +0,0 @@ -The application context -======================= - -The global `applicationContext` variable available in Foxx modules has been replaced with the `context` attribute of the `module` variable. For consistency it is now referred to as the *service* context throughout this documentation. - -Some methods of the service context have changed in ArangoDB 3.0: - -* `fileName()` now behaves like `path()` did in ArangoDB 2.x -* `path()` has been removed (use `fileName()` instead) -* `foxxFileName()` has been removed (use `fileName()` instead) - -Additionally the `version` and `name` attributes have been removed and can now only be accessed via the `manifest` attribute (as `manifest.version` and `manifest.name`). Note that the corresponding manifest fields are now optional and may be omitted. - -The `options` attribute has also been removed as it should be considered an implementation detail. You should instead access the `dependencies` and `configuration` attributes directly. - -The internal `_prefix` attribute (which was an alias for `basePath`) and the internal `comment` and `clearComments` methods (which were used by the magical documentation comments in ArangoDB 2.x) have also been removed. - -The internal `_service` attribute (which provides access to the service itself) has been renamed to `service`. diff --git a/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/Endpoints.md b/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/Endpoints.md deleted file mode 100644 index 5612edc6e943..000000000000 --- a/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/Endpoints.md +++ /dev/null @@ -1,38 +0,0 @@ -The request context -=================== - -When defining a route on a controller the controller would return an object called *request context*. Routers return a similar object called *endpoint*. Routers also return endpoints when mounting child routers or middleware, as does the `use` method of the service context. - -The main differences between the new endpoints and the objects returned by controllers in previous versions of ArangoDB are: - -* `bodyParam` is now simply called `body`; it is no longer neccessary or possible to give the body a name and the request body will not show up in the request parameters. It's also possible to specify a MIME type - -* `body`, `queryParam` and `pathParam` now take position arguments instead of an object. For specifics see the [endpoint documentation](../../Reference/Routers/Endpoints.md). - -* `notes` is now called `description` and takes a single string argument. - -* `onlyIf` and `onlyIfAuthenticated` are no longer available; they can be emulated with middleware if necessary: - -Old: - -```js -ctrl.get(/* ... */) -.onlyIf(function (req) { - if (!req.user) { - throw new Error('Not authenticated!'); - } -}); -``` - -New: - -```js -router.use(function (req, res, next) { - if (!req.arangoUser) { - res.throw(403, 'Not authenticated!'); - } - next(); -}); - -router.get(/* ... */); -``` diff --git a/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/Errors.md b/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/Errors.md deleted file mode 100644 index d8c3cf49db5d..000000000000 --- a/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/Errors.md +++ /dev/null @@ -1,33 +0,0 @@ -Error handling -============== - -The `errorResponse` method provided by controller request contexts has no equivalent in router endpoints. If you want to handle specific error types with specific status codes you need to catch them explicitly, either in the route or in a middleware: - -Old: - -```js -ctrl.get('/puppies', function (req, res) { - // Exception is thrown here -}) -.errorResponse(TooManyPuppiesError, 400, 'Something went wrong!'); -``` - -New: - -```js -ctrl.get('/puppies', function (req, res) { - try { - // Exception is thrown here - } catch (e) { - if (!(e instanceof TooManyPuppiesError)) { - throw e; - } - res.throw(400, 'Something went wrong!'); - } -}) -// The "error" method merely documents the meaning -// of the status code and has no other effect. -.error(400, 'Thrown if there are too many puppies.'); -``` - -Note that errors created with `http-errors` are still handled by Foxx intelligently. In fact `res.throw` is just a helper method for creating and throwing these errors. diff --git a/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/IoC.md b/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/IoC.md deleted file mode 100644 index a32c21148a01..000000000000 --- a/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/IoC.md +++ /dev/null @@ -1,51 +0,0 @@ -Dependency injection -==================== - -There is no equivalent of the `addInjector` method available in ArangoDB 2.x controllers. Most use cases can be solved by simply using plain variables but if you need something more flexible you can also use middleware: - -Old: - -```js -ctrl.addInjector('magicNumber', function () { - return Math.random(); -}); - -ctrl.get('/', function (req, res, injected) { - res.json(injected.magicNumber); -}); -``` - -New: - -```js -function magicMiddleware(name) { - return { - register () { - let magic; - return function (req, res, next) { - if (!magic) { - magic = Math.random(); - } - req[name] = magic; - next(); - }; - } - }; -} - -router.use(magicMiddleware('magicNumber')); - -router.get('/', function (req, res) { - res.json(req.magicNumber); -}); -``` - -Or simply: - -```js -const magicNumber = Math.random(); - -router.get('/', function (req, res) { - res.json(magicNumber); -}); -``` diff --git a/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/Middleware.md b/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/Middleware.md deleted file mode 100644 index 02cfe48f3ae5..000000000000 --- a/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/Middleware.md +++ /dev/null @@ -1,28 +0,0 @@ -Before, after and around -======================== - -The `before`, `after` and `around` methods can easily be replaced by middleware: - -Old: - -```js -let start; -ctrl.before(function (req, res) { - start = Date.now(); -}); -ctrl.after(function (req, res) { - console.log('Request handled in ', (Date.now() - start), 'ms'); -}); -``` - -New: - -```js -router.use(function (req, res, next) { - let start = Date.now(); - next(); - console.log('Request handled in ', (Date.now() - start), 'ms'); -}); -``` - -Note that unlike `around` middleware receives the `next` function as the *third* argument (the "opts" argument has no equivalent). diff --git a/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/README.md b/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/README.md deleted file mode 100644 index edc972577d33..000000000000 --- a/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/README.md +++ /dev/null @@ -1,49 +0,0 @@ -Controllers vs routers -====================== - -Foxx Controllers have been replaced with [routers](../../Reference/Routers/README.md). This is more than a cosmetic change as there are significant differences in behaviour: - -Controllers were automatically mounted when the file defining them was executed. Routers need to be explicitly mounted using the `module.context.use` method. Routers can also be exported, imported and even nested. This makes it easier to split up complex routing trees across multiple files. - -Old: - -```js -'use strict'; -const Foxx = require('org/arangodb/foxx'); -const ctrl = new Foxx.Controller(applicationContext); - -ctrl.get('/hello', function (req, res) { - // ... -}); -``` - -New: - -```js -'use strict'; -const createRouter = require('org/arangodb/foxx/router'); -const router = createRouter(); -// If you are importing this file from your entry file ("main"): -module.exports = router; -// Otherwise: module.context.use(router); - -router.get('/hello', function (req, res) { - // ... -}); -``` - -Some general changes in behaviour that might trip you up: - -* When specifying path parameters with schemas Foxx will now ignore the route if the schema does not match (i.e. `/hello/foxx` will no longer match `/hello/:num` if `num` specifies a schema that doesn't match the value `"foxx"`). With controllers this could previously result in users seeing a 400 (bad request) error when they should instead be served a 404 (not found) response. - -* When a request is made with an HTTP verb not supported by an endpoint, Foxx will now respond with a 405 (method not allowed) error with an appropriate `Allowed` header listing the supported HTTP verbs for that endpoint. - -* Foxx will no longer parse your JSDoc comments to generate route documentation (use the `summary` and `description` methods of the endpoint instead). - -* The `apiDocumentation` method now lives on the service context and behaves slightly differently. - -* There is no router equivalent for the `activateAuthentication` and `activateSessions` methods. Instead you should use the session middleware (see the section on sessions below). - -* There is no `del` alias for the `delete` method on routers. It has always been safe to use keywords as method names in Foxx, so the use of this alias was already discouraged before. - -* The `allRoutes` proxy is no lot available on routers but can easily be replaced with middleware or child routers. diff --git a/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/Request.md b/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/Request.md deleted file mode 100644 index 068f46c846d7..000000000000 --- a/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/Request.md +++ /dev/null @@ -1,116 +0,0 @@ -Request objects -=============== - -The names of some attributes of the request object have been adjusted to more closely align with those of the corresponding methods on the endpoint objects and established conventions in other JavaScript frameworks: - -* `req.urlParameters` is now called `req.pathParams` - -* `req.parameters` is now called `req.queryParams` - -* `req.params()` is now called `req.param()` - -* `req.requestType` is now called `req.method` - -* `req.compatibility` is now called `req.arangoVersion` - -* `req.user` is now called `req.arangoUser` - -Some attributes have been removed or changed: - -* `req.cookies` has been removed entirely (use `req.cookie(name)`) - -* `req.requestBody` has been removed entirely (see below) - -* `req.suffix` is now a string rather than an array - -Additionally the `req.server` and `req.client` attributes are no longer available. The information is now exposed in a way that can (optionally) transparently handle proxy forwarding headers: - -* `req.hostname` defaults to `req.server.address` - -* `req.port` defaults to `req.server.port` - -* `req.remoteAddress` defaults to `client.address` - -* `req.remotePort` defaults to `client.port` - -Finally, the `req.cookie` method now takes the `signed` options directly. - -Old: - -```js -const sid = req.cookie('sid', { - signed: { - secret: 'keyboardcat', - algorithm: 'sha256' - } -}); -``` - -New: - -```js -const sid = req.cookie('sid', { - secret: 'keyboardcat', - algorithm: 'sha256' -}); -``` - -Request bodies --------------- - -The `req.body` is no longer a method and no longer automatically parses JSON request bodies unless a request body was defined. The `req.rawBody` now corresponds to the `req.rawBodyBuffer` of ArangoDB 2.x and is also no longer a method. - -Old: - -```js -ctrl.post('/', function (req, res) { - const data = req.body(); - // ... -}); -``` - -New: - -```js -router.post('/', function (req, res) { - const data = req.body; - // ... -}) -.body(['json']); -``` - -Or simply: - -```js -const joi = require('joi'); -router.post('/', function (req, res) { - const data = req.body; - // ... -}) -.body(joi.object().optional()); -``` - -Multipart requests ------------------- - -The `req.requestParts` method has been removed entirely. If you need to accept multipart request bodies, you can simply define the request body using a multipart MIME type like `multipart/form-data`: - -Old: - -```js -ctrl.post('/', function (req, res) { - const parts = req.requestParts(); - // ... -}); -``` - -New: - -```js -router.post('/', function (req, res) { - const parts = req.body; - // ... -}) -.body(['multipart/form-data']); -``` - diff --git a/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/Response.md b/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/Response.md deleted file mode 100644 index 36a9d1ad0142..000000000000 --- a/Documentation/Books/Manual/Foxx/Migrating2x/Controllers/Response.md +++ /dev/null @@ -1,56 +0,0 @@ -Response objects -================ - -The response object has a lot of new methods in ArangoDB 3.0 but otherwise remains similar to the response object of previous versions: - -The `res.send` method behaves very differently from how the method with the same name behaved in ArangoDB 2.x: the conversion now takes the response body definition of the route into account. There is a new method `res.write` that implements the old behaviour. - -Note that consecutive calls to `res.write` will append to the response body rather than replacing it like `res.send`. - -The `res.contentType` property is also no longer available. If you want to set the MIME type of the response body to an explicit value you should set the `content-type` header instead: - -Old: - -```js -res.contentType = 'application/json'; -res.body = JSON.stringify(results); -``` - -New: - -```js -res.set('content-type', 'application/json'); -res.body = JSON.stringify(results); -``` - -Or simply: - -```js -// sets the content type to JSON -// if it has not already been set -res.json(results); -``` - -The `res.cookie` method now takes the `signed` options as part of the regular options object. - -Old: - -```js -res.cookie('sid', 'abcdef', { - ttl: 60 * 60, - signed: { - secret: 'keyboardcat', - algorithm: 'sha256' - } -}); -``` - -New: - -```js -res.cookie('sid', 'abcdef', { - ttl: 60 * 60, - secret: 'keyboardcat', - algorithm: 'sha256' -}); -``` diff --git a/Documentation/Books/Manual/Foxx/Migrating2x/Manifest.md b/Documentation/Books/Manual/Foxx/Migrating2x/Manifest.md deleted file mode 100644 index 09e84f03e38a..000000000000 --- a/Documentation/Books/Manual/Foxx/Migrating2x/Manifest.md +++ /dev/null @@ -1,144 +0,0 @@ -Manifest -======== - -Many of the fields that were required in ArangoDB 2.x are now optional and can be safely omitted. - -To avoid compatibility problems with future versions of ArangoDB you should always specify the `engines` field, e.g.: - -```json -{ - "engines": { - "arangodb": "^3.0.0" - } -} -``` - -Controllers & exports ---------------------- - -Previously Foxx distinguished between `exports` and `controllers`, each of which could be specified as an object. In ArangoDB 3.0 these have been merged into a single `main` field specifying an entry file. - -The easiest way to migrate services using multiple exports and/or controllers is to create a separate entry file that imports these files: - -Old (manifest.json): - -```json -{ - "exports": { - "doodads": "doodads.js", - "dingbats": "dingbats.js" - }, - "controllers": { - "/doodads": "routes/doodads.js", - "/dingbats": "routes/dingbats.js", - "/": "routes/root.js" - } -} -``` - -New (manifest.json): - -```json -{ - "main": "index.js" -} -``` - -New (index.js): - -```js -'use strict'; -module.context.use('/doodads', require('./routes/doodads')); -module.context.use('/dingbats', require('./routes/dingbats')); -module.context.use('/', require('./routes/root')); -module.exports = { - doodads: require('./doodads'), - dingbats: require('./dingbats') -}; -``` - -Index redirect --------------- - -If you previously did not define the `defaultDocument` field, please note that in ArangoDB 3.0 the field will no longer default to the value `index.html` when omitted: - -Old: - -```json -{ - // no defaultDocument -} -``` - -New: - -```json -{ - "defaultDocument": "index.html" -} -``` - -This also means it is no longer necessary to specify the `defaultDocument` field with an empty value to prevent the redirect and be able to serve requests at the `/` (root) path of the mount point: - -Old: - -```json -{ - "defaultDocument": "" -} -``` - -New: - -```json -{ - // no defaultDocument -} -``` - -Assets ------- - -The `assets` field is no longer supported in ArangoDB 3.0 outside of legacy compatibility mode. - -If you previously used the field to serve individual files as-is you can simply use the `files` field instead: - -Old: - -```json -{ - "assets": { - "client.js": { - "files": ["assets/client.js"], - "contentType": "application/javascript" - } - } -} -``` - -New: - -```json -{ - "files": { - "client.js": { - "path": "assets/client.js", - "type": "application/javascript" - } - } -} -``` - -If you relied on being able to specify multiple files that should be concatenated, you will have to use build tools outside of ArangoDB to prepare these files accordingly. - -Root element ------------- - -The `rootElement` field is no longer supported and has been removed entirely. - -If your controllers relied on this field being available you need to adjust your schemas and routes to be able to handle the full JSON structure of incoming documents. - -System services ---------------- - -The `isSystem` field is no longer supported. The presence or absence of the field had no effect in most recent versions of ArangoDB 2.x and has now been removed entirely. diff --git a/Documentation/Books/Manual/Foxx/Migrating2x/Queries.md b/Documentation/Books/Manual/Foxx/Migrating2x/Queries.md deleted file mode 100644 index adf6f59d0b42..000000000000 --- a/Documentation/Books/Manual/Foxx/Migrating2x/Queries.md +++ /dev/null @@ -1,47 +0,0 @@ -Foxx queries -============ - -The `createQuery` method has been removed. It can be trivially replaced with plain JavaScript functions and direct calls to [the `db._query` method](../Reference/Modules/README.md): - -Old: - -```js -'use strict'; -const Foxx = require('org/arangodb/foxx'); -const query = Foxx.createQuery({ - query: 'FOR u IN _users SORT u.user ASC RETURN u[@propName]', - params: ['propName'], - transform: function (results, uppercase) { - return ( - uppercase - ? results[0].toUpperCase() - : results[0].toLowerCase() - ); - } -}); - -query('user', true); -``` - -New: - -```js -'use strict'; -const db = require('@arangodb').db; -const aql = require('@arangodb').aql; - -function query(propName, uppercase) { - const results = db._query(aql` - FOR u IN _users - SORT u.user ASC - RETURN u[${propName}] - `); - return ( - uppercase - ? results[0].toUpperCase() - : results[0].toLowerCase() - ); -} - -query('user', true); -``` diff --git a/Documentation/Books/Manual/Foxx/Migrating2x/README.md b/Documentation/Books/Manual/Foxx/Migrating2x/README.md deleted file mode 100644 index 09e520281648..000000000000 --- a/Documentation/Books/Manual/Foxx/Migrating2x/README.md +++ /dev/null @@ -1,11 +0,0 @@ -Migrating 2.x services to 3.0 -============================= - -When migrating services from older versions of ArangoDB it is generally recommended you make sure they work in [legacy compatibility mode](../Guides/LegacyMode.md), which can also serve as a stop-gap solution. - -This chapter outlines the major differences in the Foxx API between ArangoDB 2.8 and ArangoDB 3.0. - -General changes ---------------- - -The `console` object in later versions of ArangoDB 2.x implemented a special Foxx console API and would optionally log messages to a collection. ArangoDB 3.0 restores the original behavior where `console` is the same object available from the [console module](../../Appendix/JavaScriptModules/Console.md). diff --git a/Documentation/Books/Manual/Foxx/Migrating2x/Repositories.md b/Documentation/Books/Manual/Foxx/Migrating2x/Repositories.md deleted file mode 100644 index 7a7bbf002a21..000000000000 --- a/Documentation/Books/Manual/Foxx/Migrating2x/Repositories.md +++ /dev/null @@ -1,204 +0,0 @@ -Repositories and models -======================= - -Previously Foxx was heavily built around the concept of repositories and models, which provided complex but rarely necessary abstractions on top of ArangoDB collections and documents. In ArangoDB 3.0 these have been removed entirely. - -Repositories vs collections ---------------------------- - -Repositories mostly wrapped methods that already existed on ArangoDB collection objects and primarily dealt with converting between plain ArangoDB documents and Foxx model instances. In ArangoDB 3.0 you can simply use these collections directly and treat documents as plain JavaScript objects. - -Old: - -```js -'use strict'; -const Foxx = require('org/arangodb/foxx'); -const myRepo = new Foxx.Repository( - applicationContext.collection('myCollection'), - {model: Foxx.Model} -); - -// ... - -const models = myRepo.byExample({color: 'green'}); -res.json(models.map(function (model) { - return model.forClient(); -})); -``` - -New: - -```js -'use strict'; -const myDocs = module.context.collection('myCollection'); - -// ... - -const docs = myDocs.byExample({color: 'green'}); -res.json(docs); -``` - -Schema validation ------------------ - -The main purpose of models in ArangoDB 2.x was to validate incoming data using joi schemas. In more recent versions of ArangoDB 2.x it was already possible to pass these schemas directly in most places where a model was expected as an argument. The only difference is that schemas should now be considered the default. - -If you previously relied on the automatic validation of Foxx model instances when setting attributes or instantiating models from untrusted data, you can simply use the schema's `validate` method directly. - -Old: - -```js -'use strict'; -const joi = require('joi'); -const mySchema = { - name: joi.string().required(), - size: joi.number().required() -}; -const Foxx = require('org/arangodb/foxx'); -const MyModel = Foxx.Model.extend({schema: mySchema}); - -// ... - -const model = new MyModel(req.json()); -if (!model.isValid) { - res.status(400); - res.write('Bad request'); - return; -} -``` - -New: - -```js -'use strict'; -const joi = require('joi'); -// Note this is now wrapped in a joi.object() -const mySchema = joi.object({ - name: joi.string().required(), - size: joi.number().required() -}).required(); - -// ... - -const result = mySchema.validate(req.body); -if (result.errors) { - res.status(400); - res.write('Bad request'); - return; -} -``` - -Migrating models ----------------- - -While most use cases for models can now be replaced with plain joi schemas, there is still the concept of a "model" in Foxx in ArangoDB 3.0 although it is quite different from Foxx models in ArangoDB 2.x. - -A model in Foxx now refers to a plain JavaScript object with an optional `schema` attribute and the optional methods `forClient` and `fromClient`. Models can be used instead of plain joi schemas to define request and response bodies but there are no model "instances" in ArangoDB 3.0. - -Old: - -```js -'use strict'; -const _ = require('underscore'); -const joi = require('joi'); -const Foxx = require('org/arangodb/foxx'); -const MyModel = Foxx.Model.extend({ - schema: { - name: joi.string().required(), - size: joi.number().required() - }, - forClient () { - return _.omit(this.attributes, ['_key', '_id', '_rev']); - } -}); - -// ... - -ctrl.get(/* ... */) -.bodyParam('body', {type: MyModel}); -``` - -New: - -```js -'use strict'; -const _ = require('lodash'); -const joi = require('joi'); -const MyModel = { - schema: joi.object({ - name: joi.string().required(), - size: joi.number().required() - }).required(), - forClient (data) { - return _.omit(data, ['_key', '_id', '_rev']); - } -}; - -// ... - -router.get(/* ... */) -.body(MyModel); -``` - -Triggers --------- - -When saving, updating, replacing or deleting models in ArangoDB 2.x using the repository methods the repository and model would fire events that could be subscribed to in order to perform side-effects. - -Note that even in 2.x these events would not fire when using queries or manipulating documents in any other way than using the specific repository methods that operated on individual documents. - -This behaviour is no longer available in ArangoDB 3.0 but can be emulated by using an `EventEmitter` directly if it is not possible to solve the problem differently: - -Old: - -```js -'use strict'; -const Foxx = require('org/arangodb/foxx'); -const MyModel = Foxx.Model.extend({ - // ... -}, { - afterRemove () { - console.log(this.get('name'), 'was removed'); - } -}); - -// ... - -const model = myRepo.firstExample({name: 'myName'}); -myRepo.remove(model); -// -> "myName was removed successfully" -``` - -New: - -```js -'use strict'; -const EventEmitter = require('events'); -const emitter = new EventEmitter(); -emitter.on('afterRemove', function (doc) { - console.log(doc.name, 'was removed'); -}); - -// ... - -const doc = myDocs.firstExample({name: 'myName'}); -myDocs.remove(doc); -emitter.emit('afterRemove', doc); -// -> "myName was removed successfully" -``` - -Or simply: - -```js -'use strict'; -function afterRemove(doc) { - console.log(doc.name, 'was removed'); -} - -// ... - -const doc = myDocs.firstExample({name: 'myName'}); -myDocs.remove(doc); -afterRemove(doc); -// -> "myName was removed successfully" -``` diff --git a/Documentation/Books/Manual/Foxx/Migrating2x/Sessions.md b/Documentation/Books/Manual/Foxx/Migrating2x/Sessions.md deleted file mode 100644 index 7321730cdd3d..000000000000 --- a/Documentation/Books/Manual/Foxx/Migrating2x/Sessions.md +++ /dev/null @@ -1,35 +0,0 @@ -Sessions -======== - -The `ctrl.activateSessions` method and the related `util-sessions-local` Foxx service have been replaced with the [Foxx sessions](../Reference/Sessions/README.md) middleware. It is no longer possible to use the built-in session storage but you can simply pass in any document collection directly. - -Old: - -```js -const localSessions = applicationContext.dependencies.localSessions; -const sessionStorage = localSessions.sessionStorage; -ctrl.activateSessions({ - sessionStorage: sessionStorage, - cookie: {secret: 'keyboardcat'} -}); - -ctrl.destroySession('/logout', function (req, res) { - res.json({message: 'Goodbye!'}); -}); -``` - -New: - -```js -const sessionMiddleware = require('@arangodb/foxx/sessions'); -const cookieTransport = require('@arangodb/foxx/sessions/transports/cookie'); -router.use(sessionMiddleware({ - storage: module.context.collection('sessions'), - transport: cookieTransport('keyboardcat') -})); - -router.post('/logout', function (req, res) { - req.sessionStorage.clear(req.session); - res.json({message: 'Goodbye!'}); -}); -``` diff --git a/Documentation/Books/Manual/Foxx/Migrating2x/Wayback.md b/Documentation/Books/Manual/Foxx/Migrating2x/Wayback.md deleted file mode 100644 index 762f73bd7173..000000000000 --- a/Documentation/Books/Manual/Foxx/Migrating2x/Wayback.md +++ /dev/null @@ -1,56 +0,0 @@ -Migrating from pre-2.8 -====================== - -When migrating from a version older than ArangoDB 2.8 please note that starting with ArangoDB 2.8 the behaviour of the `require` function more closely mimics the behaviour observed in Node.js and module bundlers for browsers, e.g.: - -In a file `/routes/examples.js` (relative to the root folder of the service): - -* `require('./my-module')` will be attempted to be resolved in the following order: - - 1. `/routes/my-module` (relative to service root) - 2. `/routes/my-module.js` (relative to service root) - 3. `/routes/my-module.json` (relative to service root) - 4. `/routes/my-module/index.js` (relative to service root) - 5. `/routes/my-module/index.json` (relative to service root) - -* `require('lodash')` will be attempted to be resolved in the following order: - - 1. `/routes/node_modules/lodash` (relative to service root) - 2. `/node_modules/lodash` (relative to service root) - 3. ArangoDB module `lodash` - 4. Node compatibility module `lodash` - 5. Bundled NPM module `lodash` - -* `require('/abs/path')` will be attempted to be resolved in the following order: - - 1. `/abs/path` (relative to file system root) - 2. `/abs/path.js` (relative to file system root) - 3. `/abs/path.json` (relative to file system root) - 4. `/abs/path/index.js` (relative to file system root) - 5. `/abs/path/index.json` (relative to file system root) - -This behaviour is incompatible with the source code generated by the Foxx generator in the web interface before ArangoDB 2.8. - -**Note:** The `org/arangodb` module is aliased to the new name `@arangodb` in ArangoDB 3.0.0 and the `@arangodb` module was aliased to the old name `org/arangodb` in ArangoDB 2.8.0. Either one will work in 2.8 and 3.0 but outside of legacy services you should use `@arangodb` going forward. - -Foxx queue ----------- - -In ArangoDB 2.6 Foxx introduced a new way to define queued jobs using Foxx scripts to replace the function-based job type definitions which were causing problems when restarting the server. The function-based jobs have been removed in 2.7 and are no longer supported at all. - -CoffeeScript ------------- - -ArangoDB 3.0 no longer provides built-in support for CoffeeScript source files, even in legacy compatibility mode. If you want to use an alternative language like CoffeeScript, make sure to pre-compile the raw source files to JavaScript and use the compiled JavaScript files in the service. - -The request module ------------------- - -The `@arangodb/request` module when used with the `json` option previously overwrote the string in the `body` property of the response object of the response with the parsed JSON body. In 2.8 this was changed so the parsed JSON body is added as the `json` property of the response object in addition to overwriting the `body` property. In 3.0 and later (including legacy compatibility mode) the `body` property is no longer overwritten and must use the `json` property instead. Note that this only affects code using the `json` option when making the request. - -Bundled NPM modules -------------------- - -The bundled NPM modules have been upgraded and may include backwards-incompatible changes, especially the API of `joi` has changed several times. If in doubt you should bundle your own versions of these modules to ensure specific versions will be used. - -The utility module `lodash` is now available and should be used instead of `underscore`, but both modules will continue to be provided. diff --git a/Documentation/Books/Manual/Foxx/README.md b/Documentation/Books/Manual/Foxx/README.md deleted file mode 100644 index ab849f181c02..000000000000 --- a/Documentation/Books/Manual/Foxx/README.md +++ /dev/null @@ -1,69 +0,0 @@ -Foxx Microservices -================== - -Traditionally, server-side projects have been developed as standalone applications -that guide the communication between the client-side frontend and the database -backend. This has led to applications that were either developed as single -monoliths or that duplicated data access and domain logic across all services -that had to access the database. Additionally, tools to abstract away the -underlying database calls could incur a lot of network overhead when using remote -databases without careful optimization. - -ArangoDB allows application developers to write their data access and domain logic -as microservices running directly within the database with native access to -in-memory data. The **Foxx microservice framework** makes it easy to extend -ArangoDB's own REST API with custom HTTP endpoints using modern JavaScript running -on the same V8 engine you know from Node.js and the Google Chrome web browser. - -Unlike traditional approaches to storing logic in the database (like stored -procedures), these microservices can be written as regular structured JavaScript -applications that can be easily distributed and version controlled. Depending on -your project's needs Foxx can be used to build anything from optimized REST -endpoints performing complex data access to entire standalone applications -running directly inside the database. - -How it works ------------- - -Foxx services consist of JavaScript code running in the V8 JavaScript runtime -embedded inside ArangoDB. Each service is mounted in each available V8 context -(the number of contexts can be adjusted in the server configuration). -Incoming requests are distributed across these contexts automatically. - -If you're coming from another JavaScript environment like Node.js this is -similar to running multiple Node.js processes behind a load balancer: -you should not rely on server-side state (other than the database itself) -between different requests as there is no way of making sure consecutive -requests will be handled in the same context. - -Because the JavaScript code is running inside the database another difference -is that all Foxx and ArangoDB APIs are purely synchronous and should be -considered blocking. This is especially important for transactions, -which in ArangoDB can execute arbitrary code but may have to lock -entire collections (effectively preventing any data to be written) -until the code has completed. - -Compatibility caveats ---------------------- - -Unlike JavaScript in browsers or Node.js, the JavaScript environment -in ArangoDB is synchronous. This means any code that depends on asynchronous -behavior like promises or `setTimeout` will not behave correctly in -ArangoDB or Foxx. Additionally, ArangoDB does not support native extensions -unlike Node.js. All code has to be implemented in pure JavaScript. - -While ArangoDB provides a lot of compatibility code to support code written -for Node.js, some Node.js built-in modules can not be provided by ArangoDB. -For a closer look at the Node.js modules ArangoDB does or -does not provide check out -the [appendix on JavaScript modules](../Appendix/JavaScriptModules/README.md). - -When using [bundled node modules](Guides/BundledNodeModules.md) keep in mind -that these restrictions not only apply to the modules themselves but also -the node dependencies of those modules. As a rule of thumb: - -- Modules written to work in Node.js and the browser that do not - rely on async behavior should generally work - -- Modules that rely on network or filesystem I/O or make heavy use - of async behavior most likely will not diff --git a/Documentation/Books/Manual/Foxx/Reference/Configuration.md b/Documentation/Books/Manual/Foxx/Reference/Configuration.md deleted file mode 100644 index 8c49ad14d907..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Configuration.md +++ /dev/null @@ -1,77 +0,0 @@ -Foxx configuration -================== - -Foxx services can define configuration parameters -[in the service manifest](Manifest.md) to make them more re-usable. - -The `configuration` object maps names to configuration parameters: - -* The key is the name under which the parameter will be available on the - [service context's](Context.md) `configuration` property. - -* The value is a parameter definition. - -The key should be a valid identifier following the case-insensitive format -`/^[_$a-z][-_$a-z0-9]*$/`. - -The parameter definition can have the following properties: - -* **description**: `string` - - Human readable description of the parameter. - -* **type**: `string` (Default: `"string"`) - - Type of the configuration parameter. Supported values are: - - * `"integer"` or `"int"`: - any finite integer number. - - * `"boolean"` or `"bool"`: - the values `true` or `false`. - - * `"number"`: - any finite decimal or integer number. - - * `"string"`: - any string value. - - * `"json"`: - any well-formed JSON value. - - * `"password"`: - like *string* but will be displayed as a masked input field in the web frontend. - -* **default**: `any` - - Default value of the configuration parameter. - -* **required**: (Default: `true`) - - Whether the parameter is required. - -If the configuration has parameters that do not specify a default value, you -need to configure the service before it becomes active. In the meantime a -fallback service will be mounted that responds to all requests with a HTTP 500 -status code indicating a server-side error. - -The configuration parameters of a mounted service can be adjusted from the -web interface by clicking the *Configuration* button in the service details. - - - -**Examples** - -```json -"configuration": { - "currency": { - "description": "Currency symbol to use for prices in the shop.", - "default": "$", - "type": "string" - }, - "secretKey": { - "description": "Secret key to use for signing session tokens.", - "type": "password" - } -} -``` diff --git a/Documentation/Books/Manual/Foxx/Reference/Context.md b/Documentation/Books/Manual/Foxx/Reference/Context.md deleted file mode 100644 index 0b24aab2c8c2..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Context.md +++ /dev/null @@ -1,301 +0,0 @@ -Foxx service context -==================== - -The service context provides access to methods and attributes that are specific -to a given service. In a Foxx service the context is generally available as the -`module.context` variable. Within a router's request handler the request and -response objects' `context` attribute also provide access to the context of the -service the route was mounted in (which may be different from the one the route -handler was defined in). - -**Examples** - -```js -// in service /my-foxx-1 -const createRouter = require('@arangodb/foxx/router'); -const router = createRouter(); - -// See the chapter on dependencies for more info on -// how exports and dependencies work across services -module.exports = {routes: router}; - -router.get(function (req, res) { - module.context.mount === '/my-foxx-1'; - req.context.mount === '/my-foxx-2'; - res.write('Hello from my-foxx-1'); -}); - -// in service /my-foxx-2 -const createRouter = require('@arangodb/foxx/router'); -const router2 = createRouter(); - -module.context.use(router2); - -router2.post(function (req, res) { - module.context.mount === '/my-foxx-2'; - req.context.mount === '/my-foxx-2'; - res.write('Hello from my-foxx-2'); -}); - -const router1 = module.context.dependencies.myFoxx1.routes; -module.context.use(router1); -``` - -The service context specifies the following properties: - -* **argv**: `any` - - Any arguments passed in if the current file was executed as a - [script or queued job](../Guides/Scripts.md). - -* **basePath**: `string` - - The file system path of the service, i.e. the folder in which the service - was installed to by ArangoDB. - -* **baseUrl**: `string` - - The base URL of the service, relative to the ArangoDB server, - e.g. `/_db/_system/my-foxx`. - -* **collectionPrefix**: `string` - - The prefix that will be used by *collection* and *collectionName* to derive - the names of service-specific collections. This is derived from the - service's mount point, e.g. `/my-foxx` becomes `my_foxx`. - -* **configuration**: `Object` - - [Configuration options](Configuration.md) for the service. - -* **dependencies**: `Object` - - Configured [dependencies](../Guides/Dependencies.md) for the service. - -* **isDevelopment**: `boolean` - - Indicates whether the service is running in [development mode](../README.md). - -* **isProduction**: `boolean` - - The inverse of *isDevelopment*. - -* **manifest**: `Object` - - The parsed [manifest file](Manifest.md) of the service. - -* **mount**: `string` - - The mount point of the service, e.g. `/my-foxx`. - -apiDocumentation ----------------- - -`module.context.apiDocumentation([options]): Function` - -**DEPRECATED** - -Creates a request handler that serves the API documentation. - -**Note**: This method has been deprecated in ArangoDB 3.1 and replaced with -the more straightforward `createDocumentationRouter` method providing the -same functionality. - -**Arguments** - -See `createDocumentationRouter` below. - -**Examples** - -```js -// Serve the API docs for the current service -router.get('/docs/*', module.context.apiDocumentation()); - -// Note that the path must end with a wildcard -// and the route must use HTTP GET. -``` - -createDocumentationRouter -------------------------- - -`module.context.createDocumentationRouter([options]): Router` - -Creates a router that serves the API documentation. - -**Note**: The router can be mounted like any other child router -(see examples below). - -**Arguments** - -* **options**: `Object` (optional) - - An object with any of the following properties: - - * **mount**: `string` (Default: `module.context.mount`) - - The mount path of the service to serve the documentation of. - - * **indexFile**: `string` (Default: `"index.html"`) - - File name of the HTML file serving the API documentation. - - * **swaggerRoot**: `string` (optional) - - Full path of the folder containing the Swagger assets and the *indexFile*. - Defaults to the Swagger assets used by the web interface. - - * **before**: `Function` (optional) - - A function that will be executed before a request is handled. - - If the function returns `false` the request will not be processed any further. - - If the function returns an object, its attributes will be used to override - the *options* for the current request. - - Any other return value will be ignored. - -If *options* is a function it will be used as the *before* option. - -If *options* is a string it will be used as the *swaggerRoot* option. - -Returns a Foxx router. - -**Examples** - -```js -// Serve the API docs for the current service -router.use('/docs', module.context.createDocumentationRouter()); - -// -- or -- - -// Serve the API docs for the service the router is mounted in -router.use('/docs', module.context.createDocumentationRouter(function (req) { - return {mount: req.context.mount}; -})); - -// -- or -- - -// Serve the API docs only for users authenticated with ArangoDB -router.use('/docs', module.context.createDocumentationRouter(function (req, res) { - if (req.suffix === 'swagger.json' && !req.arangoUser) { - res.throw(401, 'Not authenticated'); - } -})); -``` - -collection ----------- - -`module.context.collection(name): ArangoCollection | null` - -Passes the given name to *collectionName*, then looks up the collection with -the prefixed name. - -**Arguments** - -* **name**: `string` - - Unprefixed name of the service-specific collection. - -Returns a collection or `null` if no collection with the prefixed name exists. - -collectionName --------------- - -`module.context.collectionName(name): string` - -Prefixes the given name with the *collectionPrefix* for this service. - -**Arguments** - -* **name**: `string` - - Unprefixed name of the service-specific collection. - -Returns the prefixed name. - -**Examples** - -```js -module.context.mount === '/my-foxx' -module.context.collectionName('doodads') === 'my_foxx_doodads' -``` - -file ----- - -`module.context.file(name, [encoding]): Buffer | string` - -Passes the given name to *fileName*, then loads the file with the resulting name. - -**Arguments** - -* **name**: `string` - - Name of the file to load, relative to the current service. - -* **encoding**: `string` (optional) - - Encoding of the file, e.g. `utf-8`. If omitted the file will be loaded as a - raw buffer instead of a string. - -Returns the file's contents. - -fileName --------- - -`module.context.fileName(name): string` - -Resolves the given file name relative to the current service. - -**Arguments** - -* **name**: `string` - - Name of the file, relative to the current service. - -Returns the absolute file path. - - - -use ---- - -`module.context.use([path], router): Endpoint` - -Mounts a given router on the service to expose the router's routes on the -service's mount point. - -**Arguments** - -* **path**: `string` (Default: `"/"`) - - Path to mount the router at, relative to the service's mount point. - -* **router**: `Router | Middleware` - - A router or middleware to mount. - -Returns an [Endpoint](Routers/Endpoints.md) for the given router or middleware. - -**Note**: Mounting services at run time (e.g. within request handlers or -queued jobs) is not supported. diff --git a/Documentation/Books/Manual/Foxx/Reference/Manifest.md b/Documentation/Books/Manual/Foxx/Reference/Manifest.md deleted file mode 100644 index 73528dda851b..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Manifest.md +++ /dev/null @@ -1,308 +0,0 @@ -Service manifest -================ - -Every service comes with a `manifest.json` file providing metadata. Typically a -manifest should at least specify the version of ArangoDB the service supports and -the `main` JavaScript file which Foxx will use as the entrypoint to your service: - -```json -{ - "engines": { - "arangodb": "^3.4.0" - }, - "main": "index.js" -} -``` - -Tooling integration -------------------- - -If you are using an IDE or editor that supports JSON schema for code intelligence -or validation, you can use the public Foxx manifest schema -[available at the third-party JSON Schema Store](http://json.schemastore.org/foxx-manifest) -by adding a `$schema` field to your `manifest.json` file: - -```json -{ - "$schema": "http://json.schemastore.org/foxx-manifest" -} -``` - -### Visual Studio Code - -In [Visual Studio Code](https://code.visualstudio.com) you can also enable the -Foxx manifest schema for all `manifest.json` files by adding the following to your -[user or workspace settings](https://code.visualstudio.com/docs/getstarted/settings): - -```json -{ - "json.schemas": [ - { - "fileMatch": [ - "manifest.json" - ], - "url": "http://json.schemastore.org/foxx-manifest" - } - ] -} -``` - -Structure ---------- - -The following fields are allowed in manifests: - -- **$schema**: `"http://json.schemastore.org/foxx-manifest"` (optional) - - The JSON schema. See above. - -- **configuration**: `Object` (optional) - - An object defining the [configuration options](Configuration.md) this service requires. - - - -- **defaultDocument**: `string` (optional) - - If specified, the `/` (root) route of the service will automatically redirect - to the given relative path, e.g.: - - ```json - "defaultDocument": "index.html" - ``` - - This would have the same effect as creating the following route in JavaScript: - - ```js - const createRouter = require("@arangodb/foxx/router"); - const indexRouter = createRouter(); - indexRouter.all("/", function(req, res) { - res.redirect("index.html"); - }); - module.context.use(indexRouter); - ``` - - **Note**: As of 3.0.0 this field can safely be omitted; the value no longer - defaults to `"index.html"`. - -- **dependencies**: `Object` (optional) - - An object mapping local aliases to dependency definitions. - Each entry can be a dependency name and version range in the format - `name:version` or an object with the following properties: - - - **name**: `string` (Default: `"*"`) - - Name of the dependency. - - - **version**: `string` (Default: `"*"`) - - Version range of the dependency. - - - **description**: `string` (optional) - - Human-readable description of the dependency or how the dependency is used. - - - **required**: `boolean` (Default: `true`) - - Whether the service requires the dependency to be assigned in order to function. - If a required dependency is not assigned, the service will marked as - inoperable until a service mount point has been assigned for the dependency. - - - **multiple**: `boolean` (Default: `false`) - - Whether the dependency can be specified multiple times. If a dependency is - marked as `multiple`, the value of the local alias will be an array of all - services assigned for the dependency. - - See [the dependencies guide](../Guides/Dependencies.md) for more information. - -- **engines**: `Object` (optional) - - An object indicating the [semantic version ranges](http://semver.org) of - ArangoDB (or compatible environments) the service will be compatible with, e.g.: - - ```json - "engines": { - "arangodb": "^3.0.0" - } - ``` - - This should correctly indicate the minimum version of ArangoDB the service - has been tested against. Foxx maintains a strict semantic versioning policy - as of ArangoDB 3.0.0 so it is generally safe to use semver ranges - (e.g. `^3.0.0` to match any version greater or equal to `3.0.0` and below - `4.0.0`) for maximum compatibility. - -- **files**: `Object` (optional) - - An object defining file assets served by this service. - - Each entry can represent either a single file or a directory. - When serving entire directories, the key acts as a prefix and requests to - that prefix will be resolved within the given directory: - - - **path**: `string` - - The relative path of the file or folder within the service. - - - **type**: `string` (optional) - - The MIME content type of the file. Defaults to an intelligent guess based - on the filename's extension. - - - **gzip**: `boolean` (Default: `false`) - - If set to `true` the file will be served with gzip-encoding if supported - by the client. This can be useful when serving text files like client-side - JavaScript, CSS or HTML. - - If a string is provided instead of an object, it will be interpreted as the _path_ option. - - Example serving the `public` folder at `/static` and the `favicon.ico` at `/favicon.ico`: - - ```json - "files": { - "favicon.ico": { - "path": "public/favicon.ico", - "gzip": false - }, - "static": "public" - } - ``` - -- **lib**: `string` (Default: `"."`) - - The relative path to the Foxx JavaScript files in the service, e.g.: - - ```json - "lib": "lib" - ``` - - This would result in the main entry point (see below) and other JavaScript - paths being resolved as relative to the `lib` folder inside the service folder. - -- **main**: `string` (optional) - - The relative path to the main entry point of this service - (relative to _lib_, see above), e.g.: - - ```json - "main": "index.js" - ``` - - This would result in Foxx loading and executing the file `index.js` when - the service is mounted or started. - - **Note**: while it is technically possible to omit this field, you will - likely want to provide an entry point to your service as this is the only - way to expose HTTP routes or export a JavaScript API. - -- **provides**: `Object` (optional) - - An object mapping dependency names to version ranges of that dependency - provided by this service. See [the dependencies guide](../Guides/Dependencies.md) - for more information. - -- **scripts**: `Object` (optional) - - An object defining [named scripts](../Guides/Scripts.md) provided by this - service, which can either be used directly or as queued jobs by other services. - -- **tests**: `string` or `Array` (optional) - - One or more patterns to match the paths of test files, e.g.: - - ```json - "tests": [ - "**/test_*.js", - "**/*_test.js" - ] - ``` - - These patterns can be either relative file paths or "globstar" patterns where - - - `*` matches zero or more characters in a filename - - `**` matches zero or more nested directories. - -Additionally manifests can provide the following metadata: - -- **author**: `string` (optional) - - The full name of the author of the service (i.e. you). - This will be shown in the web interface. - -- **contributors**: `Array` (optional) - - A list of names of people that have contributed to the development of the - service in some way. This will be shown in the web interface. - -- **description**: `string` (optional) - - A human-readable description of the service. - This will be shown in the web interface. - -- **keywords**: `Array` (optional) - - A list of keywords that help categorize this service. - This is used by the Foxx Store installers to organize services. - -- **license**: `string` (optional) - - A string identifying the license under which the service is published, ideally - in the form of an [SPDX license identifier](https://spdx.org/licenses). - This will be shown in the web interface. - -- **name**: `string` (optional) - - The name of the Foxx service. Allowed characters are A-Z, 0-9, the ASCII - hyphen (`-`) and underscore (`_`) characters. The name must not start with - a number. This will be shown in the web interface. - -- **thumbnail**: `string` (optional) - - The filename of a thumbnail that will be used alongside the service in the - web interface. This should be a JPEG or PNG image that looks good at sizes - 50x50 and 160x160. - -- **version**: `string` (optional) - - The version number of the Foxx service. The version number must follow the - [semantic versioning format](http://semver.org). - This will be shown in the web interface. - -**Examples** - -```json -{ - "name": "example-foxx-service", - "version": "3.0.0-dev", - "license": "MIT", - "description": "An example service with a relatively full-featured manifest.", - "thumbnail": "foxx-icon.png", - "keywords": ["demo", "service"], - "author": "ArangoDB GmbH", - "contributors": [ - "Alan Plum " - ], - - "lib": "dist", - "main": "entry.js", - "defaultDocument": "welcome.html", - "engines": { - "arangodb": "^3.0.0" - }, - - "files": { - "welcome.html": "assets/index.html", - "hello.jpg": "assets/hello.jpg" - "world.jpg": { - "path": "assets/world.jpg", - "type": "image/jpeg", - "gzip": false - } - }, - - "tests": "dist/**.spec.js" -} -``` diff --git a/Documentation/Books/Manual/Foxx/Reference/Modules/Auth.md b/Documentation/Books/Manual/Foxx/Reference/Modules/Auth.md deleted file mode 100644 index 3a6d46cbd8c7..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Modules/Auth.md +++ /dev/null @@ -1,109 +0,0 @@ -Authentication -============== - -`const createAuth = require('@arangodb/foxx/auth');` - -Authenticators allow implementing basic password mechanism using simple -built-in hashing functions. - -Creating an authenticator -------------------------- - -`createAuth([options]): Authenticator` - -Creates an authenticator. - -**Arguments** - -* **options**: `Object` (optional) - - An object with the following properties: - - * **method**: `string` (Default: `"sha256"`) - - The hashing algorithm to use to create password hashes. - The authenticator will be able to verify passwords against hashes using - any supported hashing algorithm. This only affects new hashes created - by the authenticator. - - Supported values: - - * `"md5"` - * `"sha1"` - * `"sha224"` - * `"sha256"` - * `"sha384"` - * `"sha512"` - * `"pbkdf2"` - - **Note**: PBKDF2 is more secure but also takes considerably more resources - to compute, which will impact ArangoDB performance, especially when - verifying/hashing multiple passwords at a time. If you need a secure - authentication mechanism consider performing authentication outside the database - or using a third-party identity provider that [supports OAuth 1.0a](OAuth1.md) - or [OAuth 2.0](OAuth2.md). - - * **saltLength**: `number` (Default: `16`) - - Length of the salts that will be generated for password hashes. - - Also used as the key length for PBKDF2. - - * **workFactor**: `number` (Default: `1`) - - Can be used to scale the number of iterations for PBKDF2 hashes, - lower means faster, higher means slower. - - Note that when using PBKDF2 the number of iterations will be automatically - scaled based on the number of milliseconds elapsed since 1 January 2000, - the work factor can be used to adjust the result further as needed. - -Returns an authenticator. - -Creating authentication data objects ------------------------------------- - -`auth.create(password): AuthData` - -Creates an authentication data object for the given password with the -following properties: - -* **method**: `string` - - The method used to generate the hash. - -* **salt**: `string` - - A random salt used to generate this hash. - -* **hash**: `string` - - The hash string itself. - -**Arguments** - -* **password**: `string` - - A password to hash. - -Returns the authentication data object. - -Validating passwords against authentication data objects --------------------------------------------------------- - -`auth.verify([hash, [password]]): boolean` - -Verifies the given password against the given hash using a constant time -string comparison. - -**Arguments** - -* **hash**: `AuthData` (optional) - - A authentication data object generated with the *create* method. - -* **password**: `string` (optional) - - A password to verify against the hash. - -Returns `true` if the hash matches the given password. Returns `false` otherwise. diff --git a/Documentation/Books/Manual/Foxx/Reference/Modules/GraphQL.md b/Documentation/Books/Manual/Foxx/Reference/Modules/GraphQL.md deleted file mode 100644 index 41b7cd407181..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Modules/GraphQL.md +++ /dev/null @@ -1,169 +0,0 @@ -GraphQL integration -=================== - -`const createGraphQLRouter = require('@arangodb/foxx/graphql');` - -Foxx bundles version 0.6 of the -[`graphql-sync` module](https://github.com/arangodb/graphql-sync), which is a -synchronous wrapper for the official JavaScript GraphQL reference -implementation, to allow writing GraphQL schemas directly inside Foxx. - -Additionally the `@arangodb/foxx/graphql` lets you create routers for serving -GraphQL requests, which closely mimicks the behaviour of the -[`express-graphql` module](https://github.com/graphql/express-graphql). - -For more information on `graphql-sync` see the -[`graphql-js` API reference](http://graphql.org/docs/api-reference-graphql/) -(note that `graphql-sync` never wraps results in promises). - -For an example of a GraphQL schema in Foxx that resolves fields using the -database see [the GraphQL example service](https://github.com/arangodb-foxx/demo-graphql) -(also available from the Foxx store). - -**Examples** - -```js -const graphql = require('graphql-sync'); -const graphqlSchema = new graphql.GraphQLSchema({ - // ... -}); - -// Mounting a graphql endpoint directly in a service: -module.context.use('/graphql', createGraphQLRouter({ - schema: graphqlSchema, - graphiql: true -})); - -// Or at the service's root URL: -module.context.use(createGraphQLRouter({ - schema: graphqlSchema, - graphiql: true -})); - -// Or inside an existing router: -router.get('/hello', function (req, res) { - res.write('Hello world!'); -}); -router.use('/graphql', createGraphQLRouter({ - schema: graphqlSchema, - graphiql: true -})); -``` - -**Note**: ArangoDB aims for stability which means bundled dependencies will -generally not be updated as quickly as their maintainers make updates -available on GitHub or NPM. Starting with ArangoDB 3.2, if you want to use a -newer version than the one bundled with your target version of ArangoDB, you -can provide your own version of the library by passing it via the `graphql` option: - -```js -const graphql = require('graphql-sync'); -const graphqlSchema = new graphql.Schema({ - //... -}); -module.context.use(createGraphQLRouter({ - schema: graphqlSchema, - graphiql: true, - graphql: graphql -})) -``` - -Starting with `graphql` 0.12 you can also use -[the official graphql library](https://github.com/graphql/graphql-js) if you -include it in the `node_modules` folder of your service bundle: - -```js -const graphql = require('graphql'); // 0.12 or later -const graphqlSchema = new graphql.Schema({ - //... -}); -module.context.use(createGraphQLRouter({ - schema: graphqlSchema, - graphiql: true, - graphql: graphql -})) -``` - -Creating a router ------------------ - -`createGraphQLRouter(options): Router` - -This returns a new router object with POST and GET routes for serving GraphQL requests. - -**Arguments** - -* **options**: `object` - - An object with any of the following properties: - - * **schema**: `GraphQLSchema` - - A GraphQL Schema object from `graphql-sync`. - - * **context**: `any` (optional) - - The GraphQL context that will be passed to the `graphql()` function from - `graphql-sync` to handle GraphQL queries. - - * **rootValue**: `object` (optional) - - The GraphQL root value that will be passed to the `graphql()` function - from `graphql-sync` to handle GraphQL queries. - - * **pretty**: `boolean` (Default: `false`) - - If `true`, JSON responses will be pretty-printed. - - * **formatError**: `Function` (optional) - - A function that will be used to format errors produced by `graphql-sync`. - If omitted, the `formatError` function from `graphql-sync` will be used instead. - - * **validationRules**: `Array` (optional) - - Additional validation rules queries must satisfy in addition to those - defined in the GraphQL spec. - - * **graphiql**: `boolean` (Default: `false`) - - If `true`, the [GraphiQL](https://github.com/graphql/graphiql) explorer - will be served when loaded directly from a browser. - - * **graphql**: `object` (optional) - - If you need to use your own copy of the `graphql-sync` module instead of - the one bundled with ArangoDB, here you can pass it in directly. - -If a GraphQL Schema object is passed instead of an options object it will be -interpreted as the *schema* option. - -Generated routes ----------------- - -The router handles GET and POST requests to its root path and accepts the -following parameters, which can be provided either as query parameters or -as the POST request body: - -* **query**: `string` - - A GraphQL query that will be executed. - -* **variables**: `object | string` (optional) - - An object or a string containing a JSON object with runtime values to use - for any GraphQL query variables. - -* **operationName**: `string` (optional) - - If the provided `query` contains multiple named operations, this specifies - which operation should be executed. - -* **raw**: `boolean` (Default: `false`) - - Forces a JSON response even if *graphiql* is enabled and the request was - made using a browser. - -The POST request body can be provided as JSON or as query string using -`application/x-www-form-urlencoded`. A request body passed as -`application/graphql` will be interpreted as the `query` parameter. diff --git a/Documentation/Books/Manual/Foxx/Reference/Modules/OAuth1.md b/Documentation/Books/Manual/Foxx/Reference/Modules/OAuth1.md deleted file mode 100644 index 40ab5097fb32..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Modules/OAuth1.md +++ /dev/null @@ -1,433 +0,0 @@ -OAuth 1.0a -========== - -`const createOAuth1Client = require('@arangodb/foxx/oauth1');` - -The OAuth1 module provides abstractions over OAuth 1.0a providers like -Twitter, XING and Tumblr. - -**Examples** - -```js -const router = createRouter(); -const oauth1 = createOAuth1Client({ - // We'll use Twitter for this example - requestTokenEndpoint: 'https://api.twitter.com/oauth/request_token', - authEndpoint: 'https://api.twitter.com/oauth/authorize', - accessTokenEndpoint: 'https://api.twitter.com/oauth/access_token', - activeUserEndpoint: 'https://api.twitter.com/1.1/account/verify_credentials.json', - clientId: 'keyboardcat', - clientSecret: 'keyboardcat' -}); - -module.context.use('/oauth1', router); - -// See the user management example for setting up the -// sessions and users objects used in this example -router.use(sessions); - -router.post('/auth', function (req, res) { - const url = req.reverse('oauth1_callback'); - const oauth_callback = req.makeAbsolute(url); - const requestToken = oauth1.fetchRequestToken(oauth_callback); - if (requestToken.oauth_callback_confirmed !== 'true') { - res.throw(500, 'Could not fetch OAuth request token'); - } - // Set request token cookie for five minutes - res.cookie('oauth1_request_token', requestToken.oauth_token, {ttl: 60 * 5}); - // Redirect to the provider's authorization URL - res.redirect(303, oauth1.getAuthUrl(requestToken.oauth_token)); -}); - -router.get('/auth', function (req, res) { - // Make sure CSRF cookie matches the URL - const expectedToken = req.cookie('oauth1_request_token'); - if (!expectedToken || req.queryParams.oauth_token !== expectedToken) { - res.throw(400, 'CSRF mismatch.'); - } - const authData = oauth1.exchangeRequestToken( - req.queryParams.oauth_token, - req.queryParams.oauth_verifier - ); - const twitterToken = authData.oauth_token; - const twitterSecret = authData.oauth_token_secret; - // Fetch the active user's profile info - const profile = oauth1.fetchActiveUser(twitterToken, twitterSecret); - const twitterId = profile.screen_name; - // Try to find an existing user with the user ID - // (this requires the users collection) - let user = users.firstExample({twitterId}); - if (user) { - // Update the twitterToken if it has changed - if ( - user.twitterToken !== twitterToken || - user.twitterSecret !== twitterSecret - ) { - users.update(user, {twitterToken, twitterSecret}); - } - } else { - // Create a new user document - user = { - username: `twitter:${twitterId}`, - twitterId, - twitterToken - } - const meta = users.save(user); - Object.assign(user, meta); - } - // Log the user in (this requires the session middleware) - req.session.uid = user._key; - req.session.twitterToken = authData.twitterToken; - req.session.twitterSecret = authData.twitterSecret; - req.sessionStorage.save(req.session); - // Redirect to the default route - res.redirect(303, req.makeAbsolute('/')); -}, 'oauth1_callback') -.queryParam('oauth_token', joi.string().optional()) -.queryParam('oauth_verifier', joi.string().optional()); -``` - -Creating an OAuth1.0a client ----------------------------- - -`createOAuth1Client(options): OAuth1Client` - -Creates an OAuth1.0a client. - -**Arguments** - -* **options**: `Object` - - An object with the following properties: - - * **requestTokenEndpoint**: `string` - - The fully-qualified URL of the provider's - [Temporary Credentials Request endpoint](https://tools.ietf.org/html/rfc5849#section-2.1). - This URL is used to fetch the unauthenticated temporary credentials that - will be used to generate the authorization redirect for the user. - - * **authEndpoint**: `string` - - The fully-qualified URL of the provider's - [Resource Owner Authorization endpoint](https://tools.ietf.org/html/rfc5849#section-2.2). - This is the URL the user will be redirected to in order to authorize the - OAuth consumer (i.e. your service). - - * **accessTokenEndpoint**: `string` - - The fully-qualified URL of the provider's - [Token Request endpoint](https://tools.ietf.org/html/rfc5849#section-2.3). - This URL is used to exchange the authenticated temporary credentials - received from the authorization redirect for the actual token credentials - that can be used to make requests to the API server. - - * **activeUserEndpoint**: `string` (optional) - - The fully-qualified URL of the provider's endpoint for fetching details - about the current user. - - * **clientId**: `string` - - The application's *Client ID* (or *Consumer Key*) for the provider. - - * **clientSecret**: `string` - - The application's *Client Secret* (or *Consumer Secret*) for the provider. - - * **signatureMethod**: `string` (Default: `"HMAC-SHA1"`) - - The cryptographic method that will be used to sign OAuth 1.0a requests. - Only `"HMAC-SHA1-"` and `"PLAINTEXT"` are supported at this time. - - Note that many providers may not implement `"PLAINTEXT"` as it exposes the - *Client Secret* and `oauth_token_secret` instead of generating a signature. - -Returns an OAuth 1.0a client for the given provider. - -### Setting up OAuth 1.0a for Twitter - -If you want to use Twitter as the OAuth 1.0a provider, use the following options: - -* *requestTokenEndpoint*: `https://api.twitter.com/oauth/request_token` -* *authEndpoint*: `https://api.twitter.com/oauth/authorize` -* *accessTokenEndpoint*: `https://api.twitter.com/oauth/access_token` -* *activeUserEndpoint*: `https://api.twitter.com/1.1/account/verify_credentials.json` - -You also need to obtain a client ID and client secret from Twitter: - -1. Create a regular account at [Twitter](https://www.twitter.com) or use an - existing account you own. -2. Visit the [Twitter Application Management](https://apps.twitter.com) - dashboard and sign in with your Twitter account. -3. Click on *Create New App* and follow the instructions provided. - The *Callback URL* should match your *oauth_callback* later. You may be - prompted to add a mobile phone number to your account and verify it. -4. Open the *Keys and Access Tones* tab, then note down the *Consumer Key* - and *Consumer Secret*. -5. Set the option *clientId* to the *Consumer Key* and the option - *clientSecret* to the *Consumer Secret*. - -Note that if you only need read-only access to public information, you can also -[use the *clientId* and *clientSecret* directly](https://dev.twitter.com/oauth/application-only) -without OAuth 1.0a. - -See [Twitter REST API Reference Documentation](https://dev.twitter.com/rest/reference). - -### Setting up OAuth 1.0a for XING - -If you want to use XING as the OAuth 1.0a provider, use the following options: - -* *requestTokenEndpoint*: `https://api.xing.com/v1/request_token` -* *authEndpoint*: `https://api.xing.com/v1/authorize` -* *accessTokenEndpoint*: `https://api.xing.com/v1/access_token` -* *activeUserEndpoint*: `https://api.xing.com/v1/users/me` - -You also need to obtain a client ID and client secret from XING: - -1. Create a regular account at [XING](https://xing.com) or use an existing - account you own. -2. Visit the [XING Developer](https://dev.xing.com) page and sign in with - your XING account. -3. Click on *Create app* and note down the *Consumer key* and *Consumer secret*. -4. Set the option *clientId* to the *Consumer key* and the option - *clientSecret* to the *Consumer secret*. - -See [XING Developer Documentation](https://dev.xing.com/docs). - -### Setting up OAuth 1.0a for Tumblr - -If you want to use Tumblr as the OAuth 1.0a provider, use the following options: - -* *requestTokenEndpoint*: `https://www.tumblr.com/oauth/request_token` -* *authEndpoint*: `https://www.tumblr.com/oauth/authorize` -* *accessTokenEndpoint*: `https://www.tumblr.com/oauth/access_token` -* *activeUserEndpoint*: `https://api.tumblr.com/v2/user/info` - -You also need to obtain a client ID and client secret from Tumblr: - -1. Create a regular account at [Tumblr](https://www.tumblr.com) or use an - existing account you own. -2. Visit the [Tumblr Applications](https://www.tumblr.com/oauth/apps) dashboard. -3. Click on *Register application*, then follow the instructions provided. - The *Default callback URL* should match your *oauth_callback* later. -4. Note down the *OAuth Consumer Key* and *Secret Key*. The secret may be - hidden by default. -5. Set the option *clientId* to the *OAuth Consumer Key* and the option - *clientSecret* to the *Secret Key*. - -See [Tumblr API Documentation](https://www.tumblr.com/docs/en/api/v2). - -Fetch an unauthenticated request token --------------------------------------- - -`oauth1.fetchRequestToken(oauth_callback, opts)` - -Fetches an `oauth_token` that can be used to create an authorization URL that -redirects to the given `oauth_callback` on confirmation. - -Performs a *POST* response to the *requestTokenEndpoint*. - -Throws an exception if the remote server responds with an empty response body. - -**Arguments** - -* **oauth_callback**: `string` - - The fully-qualified URL of your application's OAuth 1.0a callback. - -* **opts**: `Object` (optional) - - An object with additional query parameters to include in the request. - - See [RFC 5849](https://tools.ietf.org/html/rfc5849). - -Returns the parsed response object. - -Get the authorization URL -------------------------- - -`oauth1.getAuthUrl(oauth_token, opts): string` - -Generates the authorization URL for the authorization endpoint. - -**Arguments** - -* **oauth_token**: `string` - - The `oauth_token` previously returned by `fetchRequestToken`. - -* **opts**: (optional) - - An object with additional query parameters to add to the URL. - - See [RFC 5849](https://tools.ietf.org/html/rfc5849). - -Returns a fully-qualified URL for the authorization endpoint of the provider -by appending the `oauth_token` and any additional arguments from *opts* to -the *authEndpoint*. - -**Examples** - -```js -const requestToken = oauth1.fetchRequestToken(oauth_callback); -if (requestToken.oauth_callback_confirmed !== 'true') { - throw new Error('Provider could not confirm OAuth 1.0 callback'); -} -const authUrl = oauth1.getAuthUrl(requestToken.oauth_token); -``` - -Exchange an authenticated request token for an access token ------------------------------------------------------------ - -`oauth1.exchangeRequestToken(oauth_token, oauth_verifier, opts)` - -Takes a pair of authenticated temporary credentials passed to the callback URL -by the provider and exchanges it for an `oauth_token` and `oauth_token_secret` -than can be used to perform authenticated requests to the OAuth 1.0a provider. - -Performs a *POST* response to the *accessTokenEndpoint*. - -Throws an exception if the remote server responds with an empty response body. - -**Arguments** - -* **oauth_token**: `string` - - The `oauth_token` passed to the callback URL by the provider. - -* **oauth_verifier**: `string` - - The `oauth_verifier` passed to the callback URL by the provider. - -* **opts**: `Object` (optional) - - An object with additional query parameters to include in the request. - - See [RFC 5849](https://tools.ietf.org/html/rfc5849). - -Returns the parsed response object. - -Fetch the active user ---------------------- - -`oauth1.fetchActiveUser(oauth_token, oauth_token_secret, opts): Object` - -Fetches details of the active user. - -Performs a *GET* response to the *activeUserEndpoint*. - -Throws an exception if the remote server responds with an empty response body. - -Returns `null` if the *activeUserEndpoint* is not configured. - -**Arguments** - -* **oauth_token**: `string` - - An OAuth 1.0a access token as returned by *exchangeRequestToken*. - -* **oauth_token_secret**: `string` - - An OAuth 1.0a access token secret as returned by *exchangeRequestToken*. - -* **opts**: `Object` (optional) - - An object with additional query parameters to include in the request. - - See [RFC 5849](https://tools.ietf.org/html/rfc5849). - -Returns the parsed response object. - -**Examples** - -```js -const authData = oauth1.exchangeRequestToken(oauth_token, oauth_verifier); -const userData = oauth1.fetchActiveUser(authData.oauth_token, authData.oauth_token_secret); -``` - -Create an authenticated request object --------------------------------------- - -`oauth1.createSignedRequest(method, url, parameters, oauth_token, oauth_token_secret)` - -Creates a request object that can be used to perform a request to the OAuth 1.0a -provider with the provided token credentials. - -**Arguments** - -* **method**: `string` - - HTTP method the request will use, e.g. `"POST"`. - -* **url**: `string` - - The fully-qualified URL of the provider the request will be performed against. - - The URL may optionally contain any number of query parameters. - -* **parameters**: `string | Object | null` - - An additional object or query string containing query parameters or body - parameters that will be part of the signed request. - -* **oauth_token**: `string` - - An OAuth 1.0a access token as returned by *exchangeRequestToken*. - -* **oauth_token_secret**: `string` - - An OAuth 1.0a access token secret as returned by *exchangeRequestToken*. - -Returns an object with three properties: - - * **url**: The normalized URL without any query parameters. - - * **qs**: A normalized query string containing all *parameters* and query parameters. - - * **headers**: An object containing the following properties: - - * **accept**: The string `"application/json"`. - - * **authorization**: An OAuth authorization header containing all OAuth - parameters and the request signature. - -**Examples** - -Fetch a list of tweets mentioning `@arangodb`: - -```js -const request = require('@arangodb/request'); -const req = oauth1.createSignedRequest( - 'GET', - 'https://api.twitter.com/1.1/search/tweets.json', - {q: '@arangodb'}, - authData.oauth_token, - authData.oauth_token_secret -); -const res = request(req); -console.log(res.json.statuses); -``` - -Signing a more complex request: - -```js -const url = 'https://api.example.com/v1/timeline?visible=public'; -const params = {hello: 'world', longcat: 'is long'}; -const req = oauth1.createSignedRequest( - 'POST', - url, // URL includes a query parameter that will be signed - params, // Request body needs to be signed too - authData.oauth_token, - authData.oauth_token_secret -); -const res = request.post(url, { - form: params, - headers: { - accept: 'application/x-www-form-urlencoded', - // Authorization header includes the signature - authorization: req.headers.authorization - } -}); -console.log(res.json); -``` diff --git a/Documentation/Books/Manual/Foxx/Reference/Modules/OAuth2.md b/Documentation/Books/Manual/Foxx/Reference/Modules/OAuth2.md deleted file mode 100644 index c8e2865c5bf6..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Modules/OAuth2.md +++ /dev/null @@ -1,291 +0,0 @@ -OAuth 2.0 -========= - -`const createOAuth2Client = require('@arangodb/foxx/oauth2');` - -The OAuth2 module provides abstractions over OAuth 2.0 providers like -Facebook, GitHub and Google. - -**Examples** - -```js -const crypto = require('@arangodb/crypto'); -const router = createRouter(); -const oauth2 = createOAuth2Client({ - // We'll use Facebook for this example - authEndpoint: 'https://www.facebook.com/dialog/oauth', - tokenEndpoint: 'https://graph.facebook.com/oauth/access_token', - activeUserEndpoint: 'https://graph.facebook.com/v2.0/me', - clientId: 'keyboardcat', - clientSecret: 'keyboardcat' -}); - -module.context.use('/oauth2', router); - -// See the user management example for setting up the -// sessions and users objects used in this example -router.use(sessions); - -router.post('/auth', function (req, res) { - const csrfToken = crypto.genRandomAlphaNumbers(32); - const url = req.reverse('oauth2_callback', {csrfToken}); - const redirect_uri = req.makeAbsolute(url); - // Set CSRF cookie for five minutes - res.cookie('oauth2_csrf_token', csrfToken, {ttl: 60 * 5}); - // Redirect to the provider's authorization URL - res.redirect(303, oauth2.getAuthUrl(redirect_uri)); -}); - -router.get('/auth', function (req, res) { - // Some providers pass errors as query parameter - if (req.queryParams.error) { - res.throw(500, `Provider error: ${req.queryParams.error}`) - } - // Make sure CSRF cookie matches the URL - const expectedToken = req.cookie('oauth2_csrf_token'); - if (!expectedToken || req.queryParams.csrfToken !== expectedToken) { - res.throw(400, 'CSRF mismatch.'); - } - // Make sure the URL contains a grant token - if (!req.queryParams.code) { - res.throw(400, 'Provider did not pass grant token.'); - } - // Reconstruct the redirect_uri used for the grant token - const url = req.reverse('oauth2_callback'); - const redirect_uri = req.makeAbsolute(url); - // Fetch an access token from the provider - const authData = oauth2.exchangeGrantToken( - req.queryParams.code, - redirect_uri - ); - const facebookToken = authData.access_token; - // Fetch the active user's profile info - const profile = oauth2.fetchActiveUser(facebookToken); - const facebookId = profile.id; - // Try to find an existing user with the user ID - // (this requires the users collection) - let user = users.firstExample({facebookId}); - if (user) { - // Update the facebookToken if it has changed - if (user.facebookToken !== facebookToken) { - users.update(user, {facebookToken}); - } - } else { - // Create a new user document - user = { - username: `fb:${facebookId}`, - facebookId, - facebookToken - } - const meta = users.save(user); - Object.assign(user, meta); - } - // Log the user in (this requires the session middleware) - req.session.uid = user._key; - req.session.facebookToken = authData.facebookToken; - req.sessionStorage.save(req.session); - // Redirect to the default route - res.redirect(303, req.makeAbsolute('/')); -}, 'oauth2_callback') -.queryParam('error', joi.string().optional()) -.queryParam('csrfToken', joi.string().optional()) -.queryParam('code', joi.string().optional()); -``` - -Creating an OAuth 2.0 client -------------------------- - -`createOAuth2Client(options): OAuth2Client` - -Creates an OAuth 2.0 client. - -**Arguments** - -* **options**: `Object` - - An object with the following properties: - - * **authEndpoint**: `string` - - The fully-qualified URL of the provider's - [authorization endpoint](http://tools.ietf.org/html/rfc6749#section-3.1). - - * **tokenEndpoint**: `string` - - The fully-qualified URL of the provider's - [token endpoint](http://tools.ietf.org/html/rfc6749#section-3.2). - - * **refreshEndpoint**: `string` (optional) - - The fully-qualified URL of the provider's - [refresh token endpoint](http://tools.ietf.org/html/rfc6749#section-6). - - * **activeUserEndpoint**: `string` (optional) - - The fully-qualified URL of the provider's endpoint for fetching - details about the current user. - - * **clientId**: `string` - - The application's *Client ID* (or *App ID*) for the provider. - - * **clientSecret**: `string` - - The application's *Client Secret* (or *App Secret*) for the provider. - -Returns an OAuth 2.0 client for the given provider. - -### Setting up OAuth 2.0 for Facebook - -If you want to use Facebook as the OAuth 2.0 provider, use the following options: - -* *authEndpoint*: `https://www.facebook.com/dialog/oauth` -* *tokenEndpoint*: `https://graph.facebook.com/oauth/access_token` -* *activeUserEndpoint*: `https://graph.facebook.com/v2.0/me` - -You also need to obtain a client ID and client secret from Facebook: - -1. Create a regular account at [Facebook](https://www.facebook.com) or use an - existing account you own. -2. Visit the [Facebook Developers](https://developers.facebook.com) page. -3. Click on *Apps* in the menu, then select *Register as a Developer* - (the only option) and follow the instructions provided. You may need to - verify your account by phone. -4. Click on *Apps* in the menu, then select *Create a New App* and follow - the instructions provided. -5. Open the app dashboard, then note down the *App ID* and *App Secret*. - The secret may be hidden by default. -6. Click on *Settings*, then *Advanced* and enter one or more - *Valid OAuth redirect URIs*. At least one of them must match your - *redirect_uri* later. Don't forget to save your changes. -7. Set the option *clientId* to the *App ID* and the option *clientSecret* - to the *App Secret*. - -### Setting up OAuth 2.0 for GitHub - -If you want to use GitHub as the OAuth 2.0 provider, use the following options: - -* *authEndpoint*: `https://github.com/login/oauth/authorize?scope=user` -* *tokenEndpoint*: `https://github.com/login/oauth/access_token` -* *activeUserEndpoint*: `https://api.github.com/user` - -You also need to obtain a client ID and client secret from GitHub: - -1. Create a regular account at [GitHub](https://github.com) or use an - existing account you own. -2. Go to [Account Settings > Applications > Register new application](https://github.com/settings/applications/new). -3. Provide an *authorization callback URL*. This must match your - *redirect_uri* later. -4. Fill in the other required details and follow the instructions provided. -5. Open the application page, then note down the *Client ID* and *Client Secret*. -6. Set the option *clientId* to the *Client ID* and the option *clientSecret* - to the *Client Secret*. - -### Setting up OAuth 2.0 for Google - -If you want to use Google as the OAuth 2.0 provider, use the following options: - -* *authEndpoint*: `https://accounts.google.com/o/oauth2/auth?access_type=offline&scope=profile` -* *tokenEndpoint*: `https://accounts.google.com/o/oauth2/token` -* *activeUserEndpoint*: `https://www.googleapis.com/plus/v1/people/me` - -You also need to obtain a client ID and client secret from Google: - -1. Create a regular account at [Google](https://www.google.com) or use an - existing account you own. -2. Visit the [Google Developers Console](https://console.developers.google.com). -3. Click on *Create Project*, then follow the instructions provided. -4. When your project is ready, open the project dashboard, then click on - *Enable an API*. -5. Enable the *Google+ API* to allow your app to distinguish between different users. -6. Open the *Credentials* page and click *Create new Client ID*, then follow - the instructions provided. At least one *Authorized Redirect URI* must match - your *redirect_uri* later. At least one *Authorized JavaScript Origin* must - match your app's fully-qualified domain. -7. When the Client ID is ready, note down the *Client ID* and *Client secret*. -8. Set the option *clientId* to the *Client ID* and the option *clientSecret* - to the *Client secret*. - -Get the authorization URL -------------------------- - -`oauth2.getAuthUrl(redirect_uri, args): string` - -Generates the authorization URL for the authorization endpoint. - -**Arguments** - -* **redirect_uri**: `string` - - The fully-qualified URL of your application's OAuth 2.0 callback. - -* **args**: (optional) - - An object with any of the following properties: - - * **response_type**: `string` (Default: `"code"`) - - See [RFC 6749](http://tools.ietf.org/html/rfc6749). - -Returns a fully-qualified URL for the authorization endpoint of the provider -by appending the client ID and any additional arguments from *args* to the -*authEndpoint*. - -Exchange a grant code for an access token ------------------------------------------ - -`oauth2.exchangeGrantToken(code, redirect_uri)` - -Exchanges a grant code for an access token. - -Performs a *POST* response to the *tokenEndpoint*. - -Throws an exception if the remote server responds with an empty response body. - -**Arguments** - -* **code**: `string` - - A grant code returned by the provider's authorization endpoint. - -* **redirect_uri**: `string` - - The original callback URL with which the code was requested. - -* **args**: `Object` (optional) - - An object with any of the following properties: - - * **grant_type**: `string` (Default: `"authorization_code"`) - - See [RFC 6749](http://tools.ietf.org/html/rfc6749). - -Returns the parsed response object. - -Fetch the active user ---------------------- - -`oauth2.fetchActiveUser(access_token): Object` - -Fetches details of the active user. - -Performs a *GET* response to the *activeUserEndpoint*. - -Throws an exception if the remote server responds with an empty response body. - -Returns `null` if the *activeUserEndpoint* is not configured. - -**Arguments** - -* **access_token**: `string` - - An OAuth 2.0 access token as returned by *exchangeGrantToken*. - -Returns the parsed response object. - -**Examples** - -```js -const authData = oauth2.exchangeGrantToken(code, redirect_uri); -const userData = oauth2.fetchActiveUser(authData.access_token); -``` diff --git a/Documentation/Books/Manual/Foxx/Reference/Modules/Queues.md b/Documentation/Books/Manual/Foxx/Reference/Modules/Queues.md deleted file mode 100644 index a9b8ef76a6cc..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Modules/Queues.md +++ /dev/null @@ -1,461 +0,0 @@ -Foxx queues -=========== - -`const queues = require('@arangodb/foxx/queues')` - -Foxx allows defining job queues that let you perform slow or expensive actions -asynchronously. These queues can be used to send e-mails, call external APIs or -perform other actions that you do not want to perform directly or want to retry -on failure. - -Foxx queue jobs can be any [script](../../Guides/Scripts.md) named in the -[manifest](../Manifest.md) of a service in the same database. - -Please note that Foxx queues are database-specific. Queues and jobs are always -relative to the database in which they are created or accessed. - -For disabling the Foxx queues feature or adjusting the polling interval see the -[`foxx.queues` and `foxx.queues-poll-interval` options](../../../Programs/Arangod/Foxx.md). - -For the low-level functionality see the chapter on the -[task management module](../../../Appendix/JavaScriptModules/Tasks.md). - -Managing queues ---------------- - -### queues.create - -`queues.create(name, [maxWorkers]): Queue` - -Returns the queue for the given name. If the queue does not exist, a new queue -with the given name will be created. If a queue with the given name already exists -and maxWorkers is set, the queue's maximum number of workers will be updated. -The queue will be created in the current database. - -**Arguments** - -* **name**: `string` - - Name of the queue to create. - -* **maxWorkers**: `number` (Default: `1`) - - The maximum number of workers. - -**Examples** - -```js -// Create a queue with the default number of workers (i.e. one) -const queue1 = queues.create("my-queue"); -// Create a queue with a given number of workers -const queue2 = queues.create("another-queue", 2); -// Update the number of workers of an existing queue -const queue3 = queues.create("my-queue", 10); -// queue1 and queue3 refer to the same queue -assertEqual(queue1, queue3); -``` - -### queues.get - -`queues.get(name): Queue` - -Returns the queue for the given name. If the queue does not exist an exception -is thrown instead. - -The queue will be looked up in the current database. - -**Arguments** - -* **name**: `string` - - Name of the queue to fetch. - -**Examples** - -If the queue does not yet exist an exception is thrown: - -```js -queues.get("some-queue"); -// Error: Queue does not exist: some-queue -// at ... -``` - -Otherwise the queue will be returned: - -```js -const queue1 = queues.create("some-queue"); -const queue2 = queues.get("some-queue"); -assertEqual(queue1, queue2); -``` - -### queues.delete - -`queues.delete(name): boolean` - -Returns `true` if the queue was deleted successfully. -If the queue did not exist, it returns `false` instead. -The queue will be looked up and deleted in the current database. - -When a queue is deleted, jobs on that queue will no longer be executed. - -Deleting a queue will not delete any jobs on that queue. - -**Arguments** - -* **name**: `string` - - Name of the queue to delete. - -**Examples** - -```js -const queue = queues.create("my-queue"); -queues.delete("my-queue"); // true -queues.delete("my-queue"); // false -``` - -Queue API ---------- - -### queue.push - -`queue.push(script, data, [opts]): string` - -The job will be added to the specified queue in the current database. - -Returns the job id. - -**Arguments** - -* **script**: `object` - - A job type definition, consisting of an object with the following properties: - - * **name**: `string` - - Name of the script that will be invoked. - - * **mount**: `string` - - Mount path of the service that defines the script. - - * **backOff**: `Function | number` (Default: `1000`) - - Either a function that takes the number of times the job has failed before - as input and returns the number of milliseconds to wait before trying the - job again, or the delay to be used to calculate an - [exponential back-off](https://en.wikipedia.org/wiki/Exponential_backoff), - or `0` for no delay. - - * **maxFailures**: `number | Infinity` (Default: `0`): - - Number of times a single run of a job will be re-tried before it is marked - as `"failed"`. A negative value or `Infinity` means that the job will be - re-tried on failure indefinitely. - - * **schema**: `Schema` (optional) - - Schema to validate a job's data against before enqueuing the job. - - * **preprocess**: `Function` (optional) - - Function to pre-process a job's (validated) data before serializing it in the queue. - -* **data**: `any` - - Job data of the job; must be serializable to JSON. - -* **opts**: `object` (optional) - - Object with any of the following properties: - - * **success**: `Function` (optional) - - Function to be called after the job has been completed successfully. - - * **failure**: `Function` (optional) - - Function to be called after the job has failed too many times. - - * **delayUntil**: `number | Date` (Default: `Date.now()`) - - Timestamp in milliseconds (or `Date` instance) until which the execution of - the job should be delayed. - - * **backOff**: `Function | number` (Default: `1000`) - - See *script.backOff*. - - * **maxFailures**: `number | Infinity` (Default: `0`): - - See *script.maxFailures*. - - * **repeatTimes**: `number` (Default: `0`) - - If set to a positive number, the job will be repeated this many times - (not counting recovery when using *maxFailures*). - If set to a negative number or `Infinity`, the job will be repeated - indefinitely. If set to `0` the job will not be repeated. - - * **repeatUntil**: `number | Date` (optional) - - If the job is set to automatically repeat, this can be set to a timestamp - in milliseconds (or `Date` instance) after which the job will no longer repeat. - Setting this value to zero, a negative value or `Infinity` has no effect. - - * **repeatDelay**: `number` (Default: `0`) - - If the job is set to automatically repeat, this can be set to a non-negative - value to set the number of milliseconds for which the job will be delayed - before it is started again. - -Note that if you pass a function for the *backOff* calculation, *success* -callback or *failure* callback options the function will be serialized to -the database as a string and therefore must not rely on any external scope -or external variables. - -When the job is set to automatically repeat, the *failure* callback will only -be executed when a run of the job has failed more than *maxFailures* times. -Note that if the job fails and *maxFailures* is set, it will be rescheduled -according to the *backOff* until it has either failed too many times or -completed successfully before being scheduled according to the *repeatDelay* -again. Recovery attempts by *maxFailures* do not count towards *repeatTimes*. - -The *success* and *failure* callbacks receive the following arguments: - -* **result**: `any` - - The return value of the script for the current run of the job. - -* **jobData**: `any` - - The data passed to this method. - -* **job**: `object` - - ArangoDB document representing the job's current state. - -**Examples** - -Let's say we have an service mounted at `/mailer` that provides a script called `send-mail`: - -```js -'use strict'; -const queues = require('@arangodb/foxx/queues'); -const queue = queues.create('my-queue'); -queue.push( - {mount: '/mailer', name: 'send-mail'}, - {to: 'hello@example.com', body: 'Hello world'} -); -``` - -This will *not* work, because `log` was defined outside the callback function -(the callback must be serializable to a string): - -```js -// WARNING: THIS DOES NOT WORK! -'use strict'; -const queues = require('@arangodb/foxx/queues'); -const queue = queues.create('my-queue'); -const log = require('console').log; // outside the callback's function scope -queue.push( - {mount: '/mailer', name: 'send-mail'}, - {to: 'hello@example.com', body: 'Hello world'}, - {success: function () { - log('Yay!'); // throws 'log is not defined' - }} -); -``` - -Here's an example of a job that will be executed every 5 seconds until tomorrow: - -```js -'use strict'; -const queues = require('@arangodb/foxx').queues; -const queue = queues.create('my-queue'); -queue.push( - {mount: '/mailer', name: 'send-mail'}, - {to: 'hello@example.com', body: 'Hello world'}, - { - repeatTimes: Infinity, - repeatUntil: Date.now() + (24 * 60 * 60 * 1000), - repeatDelay: 5 * 1000 - } -); -``` - -### queue.get - -`queue.get(jobId): Job` - -Creates a proxy object representing a job with the given job id. - -The job will be looked up in the specified queue in the current database. - -Returns the job for the given jobId. Properties of the job object will be -fetched whenever they are referenced and can not be modified. - -**Arguments** - -* **jobId**: `string` - - The id of the job to create a proxy object for. - -**Examples** -```js -const jobId = queue.push({mount: '/logger', name: 'log'}, 'Hello World!'); -const job = queue.get(jobId); -assertEqual(job.id, jobId); -``` - -### queue.delete - -`queue.delete(jobId): boolean` - -Deletes a job with the given job id. -The job will be looked up and deleted in the specified queue in the current database. - -**Arguments** - -* **jobId**: `string` - - The id of the job to delete. - -Returns `true` if the job was deleted successfully. If the job did not exist -it returns `false` instead. - -### queue.pending - -`queue.pending([script]): Array` - -Returns an array of job ids of jobs in the given queue with the status -`"pending"`, optionally filtered by the given job type. -The jobs will be looked up in the specified queue in the current database. - -**Arguments** - -* **script**: `object` (optional) - - An object with the following properties: - - * **name**: `string` - - Name of the script. - - * **mount**: `string` - - Mount path of the service defining the script. - -**Examples** - -```js -const logScript = {mount: '/logger', name: 'log'}; -queue.push(logScript, 'Hello World!', {delayUntil: Date.now() + 50}); -assertEqual(queue.pending(logScript).length, 1); -// 50 ms later... -assertEqual(queue.pending(logScript).length, 0); -assertEqual(queue.progress(logScript).length, 1); -// even later... -assertEqual(queue.progress(logScript).length, 0); -assertEqual(queue.complete(logScript).length, 1); -``` - -### queue.progress - -`queue.progress([script])` - -Returns an array of job ids of jobs in the given queue with the status -`"progress"`, optionally filtered by the given job type. -The jobs will be looked up in the specified queue in the current database. - -**Arguments** - -* **script**: `object` (optional) - - An object with the following properties: - - * **name**: `string` - - Name of the script. - - * **mount**: `string` - - Mount path of the service defining the script. - -### queue.complete - -`queue.complete([script]): Array` - -Returns an array of job ids of jobs in the given queue with the status -`"complete"`, optionally filtered by the given job type. -The jobs will be looked up in the specified queue in the current database. - -**Arguments** - -* **script**: `object` (optional) - - An object with the following properties: - - * **name**: `string` - - Name of the script. - - * **mount**: `string` - - Mount path of the service defining the script. - -### queue.failed - -`queue.failed([script]): Array` - -Returns an array of job ids of jobs in the given queue with the status -`"failed"`, optionally filtered by the given job type. -The jobs will be looked up in the specified queue in the current database. - -**Arguments** - -* **script**: `object` (optional) - - An object with the following properties: - - * **name**: `string` - - Name of the script. - - * **mount**: `string` - - Mount path of the service defining the script. - -### queue.all - -`queue.all([script]): Array` - -Returns an array of job ids of all jobs in the given queue, -optionally filtered by the given job type. -The jobs will be looked up in the specified queue in the current database. - -**Arguments** - -* **script**: `object` (optional) - - An object with the following properties: - - * **name**: `string` - - Name of the script. - - * **mount**: `string` - - Mount path of the service defining the script. - -Job API -------- - -### job.abort - -`job.abort(): void` - -Aborts a non-completed job. - -Sets a job's status to `"failed"` if it is not already `"complete"`, -without calling the job's *onFailure* callback. diff --git a/Documentation/Books/Manual/Foxx/Reference/Modules/README.md b/Documentation/Books/Manual/Foxx/Reference/Modules/README.md deleted file mode 100644 index c38df71d596b..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Modules/README.md +++ /dev/null @@ -1,63 +0,0 @@ -Related modules -=============== - -These are some of the modules outside of Foxx you will find useful when -writing Foxx services. - -Additionally there are modules providing some level of compatibility with -Node.js as well as a number of bundled NPM modules (like lodash and joi). -For more information on these modules see -[the JavaScript modules appendix](../../../Appendix/JavaScriptModules/README.md). - -The `@arangodb` module ----------------------- - -`require('@arangodb')` - -This module provides access to various ArangoDB internals as well as three of -the most important exports necessary to work with the database in Foxx: -`db`, `aql` and `errors`. - -You can find a full description of this module in the -[ArangoDB module appendix](../../../Appendix/JavaScriptModules/ArangoDB.md). - -The `@arangodb/locals` module ------------------------------ - -`require('@arangodb/locals')` - -This module provides a `context` object which is identical to the -[service context](../Context.md) of whichever module requires it. - -There is no advantage to using this module over the `module.context` variable -directly unless you're [using a tool like Webpack](../../Guides/Webpack.md) -to translate your code and can't use the `module` object Foxx provides directly. - -The `@arangodb/request` module ------------------------------- - -`require('@arangodb/request')` - -This module provides a function for making HTTP requests to external services. -Note that while this allows communicating with third-party services it may -affect database performance by blocking Foxx requests as ArangoDB waits for -the remote service to respond. If you routinely make requests to slow external -services and are not directly interested in the response it is probably a -better idea to delegate the actual request/response cycle to a gateway service -running outside ArangoDB. - -You can find a full description of this module in the -[request module appendix](../../../Appendix/JavaScriptModules/Request.md). - -The `@arangodb/general-graph` module ------------------------------------- - -`require('@arangodb/general-graph')` - -This module provides access to ArangoDB graph definitions and various low-level -graph operations in JavaScript. For more complex queries it is probably better -to use AQL but this module can be useful in your setup and teardown scripts to -create and destroy graph definitions. - -For more information see the chapter on the -[general graph module](../../../Graphs/GeneralGraphs/README.md). diff --git a/Documentation/Books/Manual/Foxx/Reference/README.md b/Documentation/Books/Manual/Foxx/Reference/README.md deleted file mode 100644 index 15a32f94e660..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/README.md +++ /dev/null @@ -1,16 +0,0 @@ -Foxx reference -============== - -Each Foxx service is defined by a [JSON manifest](Manifest.md) -specifying the entry point, any scripts defined by the service, -possible [configuration](Configuration.md) options and Foxx dependencies, -as well as other metadata. Within a service, these options are exposed as the -[service context](Context.md), which is also used to mount -[routers](Routers/README.md) defining the service's API endpoints. - -Foxx also provides a number of [utility modules](Modules/README.md) -as well as a flexible [sessions middleware](Sessions/README.md) -with different transport and storage mechanisms. - -Foxx services can be installed and managed over the Web-UI or through -ArangoDB's [HTTP API](../../../HTTP/Foxx/Management.html). diff --git a/Documentation/Books/Manual/Foxx/Reference/Routers/Endpoints.md b/Documentation/Books/Manual/Foxx/Reference/Routers/Endpoints.md deleted file mode 100644 index f9505555aaac..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Routers/Endpoints.md +++ /dev/null @@ -1,484 +0,0 @@ -Endpoints -========= - -Endpoints are returned by the `use`, `all` -and HTTP verb (e.g. `get`, `post`) methods of [routers](./README.md) -as well as the `use` method of the [service context](../Context.md). -They can be used to attach metadata to mounted routes, middleware and -child routers that affects how requests and responses are processed or -provides API documentation. - -Endpoints should only be used to invoke the following methods. -Endpoint methods can be chained together (each method returns the endpoint itself). - -header ------- - -`endpoint.header(name, [schema], [description]): this` - -Defines a request header recognized by the endpoint. -Any additional non-defined headers will be treated as optional string values. -The definitions will also be shown in the route details in the API documentation. - -If the endpoint is a child router, all routes of that router will use this -header definition unless overridden. - - -**Arguments** - -* **name**: `string` - - Name of the header. This should be considered case insensitive as all header - names will be converted to lowercase. - -* **schema**: `Schema` (optional) - - A schema describing the format of the header value. This can be a joi schema - or anything that has a compatible `validate` method. - - The value of this header will be set to the `value` property of the - validation result. A validation failure will result in an automatic 400 - (Bad Request) error response. - -* **description**: `string` (optional) - - A human readable string that will be shown in the API documentation. - -Returns the endpoint. - -**Examples** - -```js -router.get(/* ... */) -.header('arangoVersion', joi.number().min(30000).default(30000)); -``` - -pathParam ---------- - -`endpoint.pathParam(name, [schema], [description]): this` - -Defines a path parameter recognized by the endpoint. -Path parameters are expected to be filled as part of the endpoint's mount path. -Any additional non-defined path parameters will be treated as optional -string values. The definitions will also be shown in the route details in -the API documentation. - -If the endpoint is a child router, all routes of that router will use this -parameter definition unless overridden. - -**Arguments** - -* **name**: `string` - - Name of the parameter. - -* **schema**: `Schema` (optional) - - A schema describing the format of the parameter. This can be a joi schema - or anything that has a compatible `validate` method. - - The value of this parameter will be set to the `value` property of the - validation result. A validation failure will result in the route failing to - match and being ignored (resulting in a 404 (Not Found) error response if no - other routes match). - -* **description**: `string` (optional) - - A human readable string that will be shown in the API documentation. - -Returns the endpoint. - -**Examples** - -```js -router.get('/some/:num/here', /* ... */) -.pathParam('num', joi.number().required()); -``` - -queryParam ----------- - -`endpoint.queryParam(name, [schema], [description]): this` - -Defines a query parameter recognized by the endpoint. -Any additional non-defined query parameters will be treated as optional -string values. The definitions will also be shown in the route details in -the API documentation. - -If the endpoint is a child router, all routes of that router will use this -parameter definition unless overridden. - -**Arguments** - -* **name**: `string` - - Name of the parameter. - -* **schema**: `Schema` (optional) - - A schema describing the format of the parameter. This can be a joi schema or - anything that has a compatible `validate` method. - - The value of this parameter will be set to the `value` property of the - validation result. A validation failure will result in an automatic 400 - (Bad Request) error response. - -* **description**: `string` (optional) - - A human readable string that will be shown in the API documentation. - -Returns the endpoint. - -**Examples** - -```js -router.get(/* ... */) -.queryParam('num', joi.number().required()); -``` - -body ----- - -`endpoint.body([model], [mimes], [description]): this` - -Defines the request body recognized by the endpoint. -There can only be one request body definition per endpoint. The definition will -also be shown in the route details in the API documentation. - -In the absence of a request body definition, the request object's *body* -property will be initialized to the unprocessed *rawBody* buffer. - -If the endpoint is a child router, all routes of that router will use this body -definition unless overridden. If the endpoint is a middleware, the request body -will only be parsed once (i.e. the MIME types of the route matching the same -request will be ignored but the body will still be validated again). - -**Arguments** - -* **model**: `Model | Schema | null` (optional) - - A model or joi schema describing the request body. A validation failure will - result in an automatic 400 (Bad Request) error response. - - If the value is a model with a `fromClient` method, that method will be - applied to the parsed request body. - - If the value is a schema or a model with a schema, the schema will be used - to validate the request body and the `value` property of the validation - result of the parsed request body will be used instead of the parsed request - body itself. - - If the value is a model or a schema and the MIME type has been omitted, - the MIME type will default to JSON instead. - - If the value is explicitly set to `null`, no request body will be expected. - - If the value is an array containing exactly one model or schema, the request - body will be treated as an array of items matching that model or schema. - -* **mimes**: `Array` (optional) - - An array of MIME types the route supports. - - Common non-mime aliases like "json" or "html" are also supported and will be - expanded to the appropriate MIME type (e.g. "application/json" and "text/html"). - - If the MIME type is recognized by Foxx the request body will be parsed into - the appropriate structure before being validated. Currently only JSON, - `application/x-www-form-urlencoded` and multipart formats are supported in this way. - - If the MIME type indicated in the request headers does not match any of the - supported MIME types, the first MIME type in the list will be used instead. - - Failure to parse the request body will result in an automatic 400 - (Bad Request) error response. - -* **description**: `string` (optional) - - A human readable string that will be shown in the API documentation. - -Returns the endpoint. - -**Examples** - -```js -router.post('/expects/some/json', /* ... */) -.body( - joi.object().required(), - 'This implies JSON.' -); - -router.post('/expects/nothing', /* ... */) -.body(null); // No body allowed - -router.post('/expects/some/plaintext', /* ... */) -.body(['text/plain'], 'This body will be a string.'); -``` - -response --------- - -`endpoint.response([status], [model], [mimes], [description]): this` - -Defines a response body for the endpoint. When using the response object's -`send` method in the request handler of this route, the definition with the -matching status code will be used to generate the response body. -The definitions will also be shown in the route details in the API documentation. - -If the endpoint is a child router, all routes of that router will use this -response definition unless overridden. If the endpoint is a middleware, -this method has no effect. - -**Arguments** - -* **status**: `number | string` (Default: `200` or `204`) - - HTTP status code the response applies to. If a string is provided instead of - a numeric status code it will be used to look up a numeric status code using - the [statuses](https://github.com/jshttp/statuses) module. - -* **model**: `Model | Schema | null` (optional) - - A model or joi schema describing the response body. - - If the value is a model with a `forClient` method, that method will be - applied to the data passed to `response.send` within the route if the - response status code matches (but also if no status code has been set). - - If the value is a schema or a model with a schema, the actual schema will - not be used to validate the response body and only serves to document the - response in more detail in the API documentation. - - If the value is a model or a schema and the MIME type has been omitted, - the MIME type will default to JSON instead. - - If the value is explicitly set to `null` and the status code has been omitted, - the status code will default to `204` ("no content") instead of `200`. - - If the value is an array containing exactly one model or schema, the response - body will be an array of items matching that model or schema. - -* **mimes**: `Array` (optional) - - An array of MIME types the route might respond with for this status code. - - Common non-mime aliases like "json" or "html" are also supported and will be - expanded to the appropriate MIME type (e.g. "application/json" and "text/html"). - - When using the `response.send` method the response body will be converted to - the appropriate MIME type if possible. - -* **description**: `string` (optional) - - A human-readable string that briefly describes the response and will be shown - in the endpoint's detailed documentation. - -Returns the endpoint. - -**Examples** - -```js -// This example only provides documentation -// and implies a generic JSON response body. -router.get(/* ... */) -.response( - joi.array().items(joi.string()), - 'A list of doodad identifiers.' -); - -// No response body will be expected here. -router.delete(/* ... */) -.response(null, 'The doodad no longer exists.'); - -// An endpoint can define multiple response types -// for different status codes -- but never more than -// one for each status code. -router.post(/* ... */) -.response('found', 'The doodad is located elsewhere.') -.response(201, ['text/plain'], 'The doodad was created so here is a haiku.'); - -// Here the response body will be set to -// the querystring-encoded result of -// FormModel.forClient({some: 'data'}) -// because the status code defaults to 200. -router.patch(function (req, res) { - // ... - res.send({some: 'data'}); -}) -.response(FormModel, ['application/x-www-form-urlencoded'], 'OMG.'); - -// In this case the response body will be set to -// SomeModel.forClient({some: 'data'}) because -// the status code has been set to 201 before. -router.put(function (req, res) { - // ... - res.status(201); - res.send({some: 'data'}); -}) -.response(201, SomeModel, 'Something amazing happened.'); -``` - -error ------ - -`endpoint.error(status, [description]): this` - -Documents an error status for the endpoint. - -If the endpoint is a child router, all routes of that router will use this -error description unless overridden. If the endpoint is a middleware, -this method has no effect. - -This method only affects the generated API documentation and has not other -effect within the service itself. - -**Arguments** - -* **status**: `number | string` - - HTTP status code for the error (e.g. `400` for "bad request"). If a string is - provided instead of a numeric status code it will be used to look up a numeric - status code using the [statuses](https://github.com/jshttp/statuses) module. - -* **description**: `string` (optional) - - A human-readable string that briefly describes the error condition and will - be shown in the endpoint's detailed documentation. - -Returns the endpoint. - -**Examples** - -```js -router.get(function (req, res) { - // ... - res.throw(403, 'Validation error at x.y.z'); -}) -.error(403, 'Indicates that a validation has failed.'); -``` - -summary -------- - -`endpoint.summary(summary): this` - -Adds a short description to the endpoint's API documentation. - -If the endpoint is a child router, all routes of that router will use this -summary unless overridden. If the endpoint is a middleware, this method has no effect. - -This method only affects the generated API documentation and has not other -effect within the service itself. - -**Arguments** - -* **summary**: `string` - - A human-readable string that briefly describes the endpoint and will appear - next to the endpoint's path in the documentation. - -Returns the endpoint. - -**Examples** - -```js -router.get(/* ... */) -.summary('List all discombobulated doodads') -``` - -description ------------ - -`endpoint.description(description): this` - -Adds a long description to the endpoint's API documentation. - -If the endpoint is a child router, all routes of that router will use -this description unless overridden. If the endpoint is a middleware, -this method has no effect. - -This method only affects the generated API documentation and has not -other effect within the service itself. - -**Arguments** - -* **description**: `string` - - A human-readable string that describes the endpoint in detail and - will be shown in the endpoint's detailed documentation. - -Returns the endpoint. - -**Examples** - -```js -// The "dedent" library helps formatting -// multi-line strings by adjusting indentation -// and removing leading and trailing blank lines -const dd = require('dedent'); -router.post(/* ... */) -.description(dd` - This route discombobulates the doodads by - frobnicating the moxie of the request body. -`) -``` - -deprecated ----------- - -`endpoint.deprecated([deprecated]): this` - -Marks the endpoint as deprecated. - -If the endpoint is a child router, all routes of that router will also be -marked as deprecated. If the endpoint is a middleware, this method has no effect. - -This method only affects the generated API documentation and has not other -effect within the service itself. - -**Arguments** - -* **deprecated**: `boolean` (Default: `true`) - - Whether the endpoint should be marked as deprecated. If set to `false` the - endpoint will be explicitly marked as *not* deprecated. - -Returns the endpoint. - -**Examples** - -```js -router.get(/* ... */) -.deprecated(); -``` - - -tag ---- - -`endpoint.tag(...tags): this` - -Marks the endpoint with the given tags that will be used to group related -routes in the generated API documentation. - -If the endpoint is a child router, all routes of that router will also be -marked with the tags. If the endpoint is a middleware, this method has no effect. - -This method only affects the generated API documentation and has not other -effect within the service itself. - -**Arguments** - -* **tags**: `string` - - One or more strings that will be used to group the endpoint's routes. - -Returns the endpoint. - -**Examples** - -```js -router.get(/* ... */) -.tag('auth', 'restricted'); -``` diff --git a/Documentation/Books/Manual/Foxx/Reference/Routers/Middleware.md b/Documentation/Books/Manual/Foxx/Reference/Routers/Middleware.md deleted file mode 100644 index 2646c8179131..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Routers/Middleware.md +++ /dev/null @@ -1,83 +0,0 @@ -Middleware -========== - -Middleware in Foxx refers to functions that can be mounted like routes and can -manipulate the request and response objects before and after the route itself -is invoked. They can also be used to control access or to provide common logic -like logging etc. Unlike routes, middleware is mounted with the `use` method -like a router. - -Instead of a function the `use` method can also accept an object with a -`register` function that will take a parameter `endpoint`, the middleware will -be mounted at and returns the actual middleware function. This allows -manipulating the endpoint before creating the middleware (e.g. to document -headers, request bodies, path parameters or query parameters). - -**Examples** - -Restrict access to ArangoDB-authenticated users: - -```js -module.context.use(function (req, res, next) { - if (!req.arangoUser) { - res.throw(401, 'Not authenticated with ArangoDB'); - } - next(); -}); -``` - -Any truthy argument passed to the `next` function will be thrown as an error: - -```js -module.context.use(function (req, res, next) { - let err = null; - if (!req.arangoUser) { - err = new Error('This should never happen'); - } - next(err); // throws if the error was set -}) -``` - -Trivial logging middleware: - -```js -module.context.use(function (req, res, next) { - const start = Date.now(); - try { - next(); - } finally { - console.log(`Handled request in ${Date.now() - start}ms`); - } -}); -``` - -More complex example for header-based sessions: - -```js -const sessions = module.context.collection('sessions'); -module.context.use({ - register (endpoint) { - endpoint.header('x-session-id', joi.string().optional(), 'The session ID.'); - return function (req, res, next) { - const sid = req.get('x-session-id'); - if (sid) { - try { - req.session = sessions.document(sid); - } catch (e) { - delete req.headers['x-session-id']; - } - } - next(); - if (req.session) { - if (req.session._rev) { - sessions.replace(req.session, req.session); - res.set('x-session-id', req.session._key); - } else { - const meta = sessions.save(req.session); - res.set('x-session-id', meta._key); - } - } - }; - } -}); -``` diff --git a/Documentation/Books/Manual/Foxx/Reference/Routers/README.md b/Documentation/Books/Manual/Foxx/Reference/Routers/README.md deleted file mode 100644 index 35d8cb85b06b..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Routers/README.md +++ /dev/null @@ -1,157 +0,0 @@ -Routers -======= - -`const createRouter = require('@arangodb/foxx/router');` - -Routers let you define routes that extend ArangoDB's HTTP API with custom endpoints. - -Routers need to be mounted using the `use` method of a -[service context](../Context.md) to expose their HTTP routes at a service's mount path. - -You can pass routers between services mounted in the same database -[as dependencies](../../Guides/Dependencies.md). You can even nest routers -within each other. - -Creating a router ------------------ - -`createRouter(): Router` - -This returns a new, clean router object that has not yet been mounted in the -service and can be exported like any other object. - -Request handlers ----------------- - -`router.get([path], [...middleware], handler, [name]): Endpoint` - -`router.post([path], [...middleware], handler, [name]): Endpoint` - -`router.put([path], [...middleware], handler, [name]): Endpoint` - -`router.patch([path], [...middleware], handler, [name]): Endpoint` - -`router.delete([path], [...middleware], handler, [name]): Endpoint` - -`router.all([path], [...middleware], handler, [name]): Endpoint` - -These methods let you specify routes on the router. -The `all` method defines a route that will match any supported HTTP verb, the -other methods define routes that only match the HTTP verb with the same name. - -**Arguments** - -* **path**: `string` (Default: `"/"`) - - The path of the request handler relative to the base path the Router is mounted at. - If omitted, the request handler will handle requests to the base path of the Router. - For information on defining dynamic routes see the section on - [path parameters in the chapter on router endpoints](Endpoints.md#pathparam). - -* **middleware**: `Function` (optional) - - Zero or more middleware functions that take the following arguments: - - * **req**: `Request` - - An incoming server request object. - - * **res**: `Response` - - An outgoing server response object. - - * **next**: `Function` - - A callback that passes control over to the next middleware function - and returns when that function has completed. - - If a truthy argument is passed, that argument will be thrown as an error. - - If there is no next middleware function, the `handler` will be - invoked instead (see below). - -* **handler**: `Function` - - A function that takes the following arguments: - - * **req**: `Request` - - An incoming server request object. - - * **res**: `Response` - - An outgoing server response. - -* **name**: `string` (optional) - - A name that can be used to generate URLs for the endpoint. - For more information see the `reverse` method of the [request object](Request.md). - -Returns an [Endpoint](Endpoints.md) for the route. - -**Examples** - -Simple index route: - -```js -router.get(function (req, res) { - res.set('content-type', 'text/plain'); - res.write('Hello World!'); -}); -``` - -Restricting access to authenticated ArangoDB users: - -```js -router.get('/secrets', function (req, res, next) { - if (req.arangoUser) { - next(); - } else { - res.throw(404, 'Secrets? What secrets?'); - } -}, function (req, res) { - res.download('allOurSecrets.zip'); -}); -``` - -Multiple middleware functions: - -```js -function counting (req, res, next) { - if (!req.counter) req.counter = 0; - req.counter++; - next(); - req.counter--; -} -router.get(counting, counting, counting, function (req, res) { - res.json({counter: req.counter}); // {"counter": 3} -}); -``` - -Mounting child routers and middleware -------------------------------------- - -`router.use([path], middleware, [name]): Endpoint` - -The `use` method lets you mount a child router or middleware at a given path. - -**Arguments** - -* **path**: `string` (optional) - - The path of the middleware relative to the base path the Router is mounted at. - If omitted, the middleware will handle requests to the base path of the Router. - For information on defining dynamic routes see the section on - [path parameters in the chapter on router endpoints](Endpoints.md#pathparam). - -* **middleware**: `Router | Middleware` - - An unmounted router object or a [middleware](Middleware.md). - -* **name**: `string` (optional) - - A name that can be used to generate URLs for endpoints of this router. - For more information see the `reverse` method of the [request object](Request.md). - Has no effect if *handler* is a Middleware. - -Returns an [Endpoint](Endpoints.md) for the middleware or child router. diff --git a/Documentation/Books/Manual/Foxx/Reference/Routers/Request.md b/Documentation/Books/Manual/Foxx/Reference/Routers/Request.md deleted file mode 100644 index 6260b95c72b2..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Routers/Request.md +++ /dev/null @@ -1,414 +0,0 @@ -Request objects -=============== - -The request object specifies the following properties: - -* **arangoUser**: `string | null` - - The authenticated ArangoDB username used to make the request. - This value is only set if authentication is enabled in ArangoDB and the - request set an `authorization` header ArangoDB was able to verify. - You are strongly encouraged to implement - [your own authentication logic](../../Guides/Auth.md) for your own services - but this property can be useful if you need to integrate with ArangoDB's - own authentication mechanisms. - -* **arangoVersion**: `number` - - The numeric value of the `x-arango-version` header or the numeric version - of the ArangoDB server (e.g. `30102` for version 3.1.2) if no valid header - was provided. - -* **auth**: `object | null` - - The credentials supplied in the `authorization` header if any. - - If the request uses basic authentication, the value is an object like - `{basic: {username: string}}` or - `{basic: {username: string, password: string}}` or - `{basic: {}}` (if the credentials were malformed or empty). - - If the request uses bearer authentication, the value is an object like - `{bearer: string}`. - -* **baseUrl**: `string` - - Root-relative base URL of the service, i.e. the prefix `"/_db/"` followed - by the value of *database*. - -* **body**: `any` - - The processed and validated request body for the current route. - If no body has been defined for the current route, the value will be - identical to *rawBody*. - - For details on how request bodies can be processed and validated by Foxx - see the [body method of the endpoint object](Endpoints.md#body). - -* **context**: `Context` - - The [service context](../Context.md) in which the router was mounted - (rather than the context in which the route was defined). - -* **database**: `string` - - The name of the database in which the request is being handled, e.g. `"_system"`. - -* **headers**: `object` - - The raw headers object. - - For details on how request headers can be validated by Foxx see the - [header method of the endpoint object](Endpoints.md#header). - -* **hostname**: `string` - - The hostname (domain name) indicated in the request headers. - - Defaults to the hostname portion (i.e. excluding the port) of the `Host` - header and falls back to the listening address of the server. - -* **method**: `string` - - The HTTP verb used to make the request, e.g. `"GET"`. - -* **originalUrl**: `string` - - Root-relative URL of the request, i.e. *path* followed by the raw query - parameters, if any. - -* **path**: `string` - - Database-relative path of the request URL (not including the query parameters). - -* **pathParams**: `object` - - An object mapping the names of path parameters of the current route to - their validated values. - - For details on how path parameters can be validated by Foxx see the - [pathParam method of the endpoint object](Endpoints.md#pathparam). - -* **port**: `number` - - The port indicated in the request headers. - - Defaults to the port portion (i.e. excluding the hostname) of the `Host` - header and falls back to the listening port or the appropriate default - port (`443` for HTTPS or `80` for HTTP, depending on *secure*) if the - header only indicates a hostname. - - If the request was made using a trusted proxy (see *trustProxy*), - this is set to the port portion of the `X-Forwarded-Host` header - (or appropriate default port) if present. - -* **protocol**: `string` - - The protocol used for the request. - - Defaults to `"https"` or `"http"` depending on whether ArangoDB is - configured to use SSL or not. - - If the request was made using a trusted proxy (see *trustProxy*), - this is set to the value of the `X-Forwarded-Proto` header if present. - -* **queryParams**: `object` - - An object mapping the names of query parameters of the current route to - their validated values. - - For details on how query parameters can be validated by Foxx see the - [queryParam method of the endpoint object](Endpoints.md#queryparam). - -* **rawBody**: `Buffer` - - The raw, unparsed, unvalidated request body as a buffer. - -* **remoteAddress**: `string` - - The IP of the client that made the request. - - If the request was made using a trusted proxy (see *trustProxy*), - this is set to the first IP listed in the `X-Forwarded-For` header if present. - -* **remoteAddresses**: `Array` - - A list containing the IP addresses used to make the request. - - Defaults to the value of *remoteAddress* wrapped in an array. - - If the request was made using a trusted proxy (see *trustProxy*), - this is set to the list of IPs specified in the `X-Forwarded-For` header if present. - -* **remotePort**: `number` - - The listening port of the client that made the request. - - If the request was made using a trusted proxy (see *trustProxy*), - this is set to the port specified in the `X-Forwarded-Port` header if present. - -* **secure**: `boolean` - - Whether the request was made over a secure connection (i.e. HTTPS). - - This is set to `false` when *protocol* is `"http"` and `true` when - *protocol* is `"https"`. - -* **suffix**: `string` - - The trailing path relative to the current route if the current route ends - in a wildcard (e.g. `/something/*`). - - **Note**: Starting with ArangoDB 3.2 is passed into the service as-is, i.e. - percentage escape sequences like `%2F` will no longer be unescaped. - Also note that the suffix may contain path segments like `..` which may have - special meaning if the suffix is used to build filesystem paths. - -* **trustProxy**: `boolean` - - Indicates whether the request was made using a trusted proxy. - If the origin server's address was specified in the ArangoDB configuration - using `--frontend.trusted-proxy` or the service's `trustProxy` setting is - enabled, this will be `true`, otherwise it will be `false`. - -* **url**: `string` - - The URL of the request. - -* **xhr**: `boolean` - - Whether the request indicates it was made within a browser using AJAX. - - This is set to `true` if the `X-Requested-With` header is present and is - a case-insensitive match for the value `"xmlhttprequest"`. - - Note that this value does not guarantee whether the request was made from - inside a browser or whether AJAX was used and is merely a convention - established by JavaScript frameworks like jQuery. - -accepts -------- - -`req.accepts(types): string | false` - -`req.accepts(...types): string | false` - -`req.acceptsCharsets(charsets): string | false` - -`req.acceptsCharsets(...charsets): string | false` - -`req.acceptsEncodings(encodings): string | false` - -`req.acceptsEncodings(...encodings): string | false` - -`req.acceptsLanguages(languages): string | false` - -`req.acceptsLanguages(...languages): string | false` - -These methods wrap the corresponding content negotiation methods of the -[accepts module](https://github.com/jshttp/accepts) for the current request. - -**Examples** - -```js -if (req.accepts(['json', 'html']) === 'html') { - // Client explicitly prefers HTML over JSON - res.write('

Client prefers HTML

'); -} else { - // Otherwise just send JSON - res.json({success: true}); -} -``` - -cookie ------- - -`req.cookie(name, options): string | null` - -Gets the value of a cookie by name. - -**Arguments** - -* **name**: `string` - - Name of the cookie. - -* **options**: `object` (optional) - - An object with any of the following properties: - - * **secret**: `string` (optional) - - Secret that was used to sign the cookie. - - If a secret is specified, the cookie's signature is expected to be present - in a second cookie with the same name and the suffix `.sig`. - Otherwise the signature (if present) will be ignored. - - * **algorithm**: `string` (Default: `"sha256"`) - - Algorithm that was used to sign the cookie. - -If a string is passed instead of an options object it will be interpreted as -the *secret* option. - -Returns the value of the cookie or `null` if the cookie is not set or its -signature is invalid. - -get / header ------------- - -`req.get(name): string` - -`req.header(name): string` - -Gets the value of a header by name. You can validate request headers using the -[header method of the endpoint](Endpoints.md#header). - -**Arguments** - -* **name**: `string` - - Name of the header. - -Returns the header value. - -is --- - -`req.is(types): string` - -`req.is(...types): string` - -This method wraps the (request body) content type detection method of the -[type-is module](https://github.com/jshttp/type-is) for the current request. - -**Examples** - -```js -const type = req.is('html', 'application/xml', 'application/*+xml'); -if (type === false) { // no match - handleDefault(req.rawBody); -} else if (type === 'html') { - handleHtml(req.rawBody); -} else { // is XML - handleXml(req.rawBody); -} -``` - -json ----- - -`req.json(): any` - -Attempts to parse the raw request body as JSON and returns the result. - -It is generally more useful to define a -[request body on the endpoint](Endpoints.md#body) and use the `req.body` -property instead. - -Returns `undefined` if the request body is empty. May throw a `SyntaxError` -if the body could not be parsed. - -makeAbsolute ------------- - -`req.makeAbsolute(path, [query]): string` - -Resolves the given path relative to the `req.context.service`'s mount path -to a full URL. - -**Arguments** - -* **path**: `string` - - The path to resovle. - -* **query**: `string | object` - - A string or object with query parameters to add to the URL. - -Returns the formatted absolute URL. - -params ------- - -`req.param(name): any` - -**Arguments** - -Looks up a parameter by name, preferring `pathParams` over `queryParams`. - -It's probably better style to use the `req.pathParams` or `req.queryParams` -objects directly. - -* **name**: `string` - - Name of the parameter. - -Returns the (validated) value of the parameter. - -range ------ - -`req.range([size]): Ranges | number` - -This method wraps the range header parsing method of the -[range-parser module](https://github.com/jshttp/range-parser) for the current request. - -**Arguments** - -* **size**: `number` (Default: `Infinity`) - - Length of the satisfiable range (e.g. number of bytes in the full response). - If present, ranges exceeding the size will be considered unsatisfiable. - -Returns `undefined` if the `Range` header is absent, `-2` if the header is -present but malformed, `-1` if the range is invalid (e.g. start offset is -larger than end offset) or unsatisfiable for the given size. - -Otherwise returns an array of objects with the properties *start* and *end* -values for each range. The array has an additional property *type* indicating -the request range type. - -**Examples** - -```js -console.log(req.headers.range); // "bytes=40-80" -const ranges = req.range(100); -console.log(ranges); // [{start: 40, end: 80}] -console.log(ranges.type); // "bytes" -``` - -reverse -------- - -`req.reverse(name, [params]): string` - -Looks up the URL of a named route for the given parameters. - -**Arguments** - -* **name**: `string` - - Name of the route to look up. - -* **params**: `object` (optional) - - An object containing values for the (path or query) parameters of the route. - -Returns the URL of the route for the given parameters. - -**Examples** - -```js -router.get('/items/:id', function (req, res) { - /* ... */ -}, 'getItemById'); - -router.post('/items', function (req, res) { - // ... - const url = req.reverse('getItemById', {id: createdItem._key}); - res.set('location', req.makeAbsolute(url)); -}); -``` diff --git a/Documentation/Books/Manual/Foxx/Reference/Routers/Response.md b/Documentation/Books/Manual/Foxx/Reference/Routers/Response.md deleted file mode 100644 index 76e82df1e286..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Routers/Response.md +++ /dev/null @@ -1,478 +0,0 @@ -Response objects -================ - -The response object specifies the following properties: - -* **body**: `Buffer | string` - - Response body as a string or buffer. Can be set directly or using some - of the response methods. - -* **context**: `Context` - - The [service context](../Context.md) in which the router was mounted - (rather than the context in which the route was defined). - -* **headers**: `object` - - The raw headers object. - -* **statusCode**: `number` - - Status code of the response. Defaults to `200` (body set and not an empty - string or buffer) or `204` (otherwise) if not changed from `undefined`. - -attachment ----------- - -`res.attachment([filename]): this` - -Sets the `content-disposition` header to indicate the response is a -downloadable file with the given name. - -**Note:** This does not actually modify the response body or access the -file system. To send a file from the file system see the `download` or -`sendFile` methods. - -**Arguments** - -* **filename**: `string` (optional) - - Name of the downloadable file in the response body. - - If present, the extension of the filename will be used to set the response - `content-type` if it has not yet been set. - -Returns the response object. - -cookie ------- - -`res.cookie(name, value, [options]): this` - -Sets a cookie with the given name. - -**Arguments** - -* **name**: `string` - - Name of the cookie. - -* **value**: `string` - - Value of the cookie. - -* **options**: `object` (optional) - - An object with any of the following properties: - - * **ttl**: `number` (optional) - - Time to live of the cookie in seconds. - - * **algorithm**: `string` (Default: `"sha256"`) - - Algorithm that will be used to sign the cookie. - - * **secret**: `string` (optional) - - Secret that will be used to sign the cookie. - - If a secret is specified, the cookie's signature will be stored in a second - cookie with the same options, the same name and the suffix `.sig`. - Otherwise no signature will be added. - - * **path**: `string` (optional) - - Path for which the cookie should be issued. - - * **domain**: `string` (optional) - - Domain for which the cookie should be issued. - - * **secure**: `boolean` (Default: `false`) - - Whether the cookie should be marked as secure (i.e. HTTPS/SSL-only). - - * **httpOnly**: `boolean` (Default: `false`) - - Whether the cookie should be marked as HTTP-only (rather than also exposing - it to client-side code). - -If a string is passed instead of an options object it will be interpreted as -the *secret* option. - -If a number is passed instead of an options object it will be interpreted as -the *ttl* option. - -Returns the response object. - -download --------- - -`res.download(path, [filename]): this` - -The equivalent of calling `res.attachment(filename).sendFile(path)`. - -**Arguments** - -* **path**: `string` - - Path to the file on the local filesystem to be sent as the response body. - -* **filename**: `string` (optional) - - Filename to indicate in the `content-disposition` header. - - If omitted the *path* will be used instead. - -Returns the response object. - -getHeader ---------- - -`res.getHeader(name): string` - -Gets the value of the header with the given name. - -**Arguments** - -* **name**: `string` - - Name of the header to get. - -Returns the value of the header or `undefined`. - -json ----- - -`res.json(data): this` - -Sets the response body to the JSON string value of the given data. - -**Arguments** - -* **data**: `any` - - The data to be used as the response body. - -Returns the response object. - -redirect --------- - -`res.redirect([status], path): this` - -Redirects the response by setting the response `location` header and status code. - -**Arguments** - -* **status**: `number | string` (optional) - - Response status code to set. - - If the status code is the string value `"permanent"` it will be treated as - the value `301`. - - If the status code is a string it will be converted to a numeric status code - using the [statuses module](https://github.com/jshttp/statuses) first. - - If the status code is omitted but the response status has not already been - set, the response status will be set to `302`. - -* **path**: `string` - - URL to set the `location` header to. - -Returns the response object. - -removeHeader ------------- - -`res.removeHeader(name): this` - -Removes the header with the given name from the response. - -**Arguments** - -* **name**: `string` - - Name of the header to remove. - -Returns the response object. - -send ----- - -`res.send(data, [type]): this` - -Sets the response body to the given data with respect to the response -definition for the response's current status code. - -**Arguments** - -* **data**: `any` - - The data to be used as the response body. Will be converted according the - [response definition](Endpoints.md#response) for the response's current - status code (or `200`) in the following way: - - If the data is an ArangoDB result set, it will be converted to an array first. - - If the response definition specifies a model with a `forClient` method, - that method will be applied to the data first. If the data is an array and - the response definition has the `multiple` flag set, the method will be - applied to each entry individually instead. - - Finally the data will be processed by the response type handler to convert - the response body to a string or buffer. - -* **type**: `string` (Default: `"auto"`) - - Content-type of the response body. - - If set to `"auto"` the first MIME type specified in the - [response definition](Endpoints.md#response) for the response's current - status code (or `200`) will be used instead. - - If set to `"auto"` and no response definition exists, the MIME type will - be determined the following way: - - If the data is a buffer the MIME type will be set to binary - (`application/octet-stream`). - - If the data is an object the MIME type will be set to JSON and the data - will be converted to a JSON string. - - Otherwise the MIME type will be set to HTML and the data will be - converted to a string. - -Returns the response object. - -sendFile --------- - -`res.sendFile(path, [options]): this` - -Sends a file from the local filesystem as the response body. - -**Arguments** - -* **path**: `string` - - Path to the file on the local filesystem to be sent as the response body. - - If no `content-type` header has been set yet, the extension of the filename - will be used to set the value of that header. - -* **options**: `object` (optional) - - An object with any of the following properties: - - * **lastModified**: `boolean` (optional) - - If set to `true` or if no `last-modified` header has been set yet and the - value is not set to `false` the `last-modified` header will be set to the - modification date of the file in milliseconds. - -Returns the response object. - -**Examples** - -```js -// Send the file "favicon.ico" from this service's folder -res.sendFile(module.context.fileName('favicon.ico')); -``` - -sendStatus ----------- - -`res.sendStatus(status): this` - -Sends a plaintext response for the given status code. -The response status will be set to the given status code, the response body -will be set to the status message corresponding to that status code. - -**Arguments** - -* **status**: `number | string` - - Response status code to set. - - If the status code is a string it will be converted to a numeric status code - using the [statuses module](https://github.com/jshttp/statuses) first. - -Returns the response object. - -setHeader / set ---------------- - -`res.setHeader(name, value): this` - -`res.set(name, value): this` - -`res.set(headers): this` - -Sets the value of the header with the given name. - -**Arguments** - -* **name**: `string` - - Name of the header to set. - -* **value**: `string` - - Value to set the header to. - -* **headers**: `object` - - Header object mapping header names to values. - -Returns the response object. - -status ------- - -`res.status(status): this` - -Sets the response status to the given status code. - -**Arguments** - -* **status**: `number | string` - - Response status code to set. - - If the status code is a string it will be converted to a numeric status - code using the [statuses module](https://github.com/jshttp/statuses) first. - -Returns the response object. - -throw ------ - -`res.throw(status, [reason], [options]): void` - -Throws an HTTP exception for the given status, which will be handled by Foxx -to serve the appropriate JSON error response. - -**Arguments** - -* **status**: `number | string` - - Response status code to set. - - If the status code is a string it will be converted to a numeric status code - using the [statuses module](https://github.com/jshttp/statuses) first. - - If the status code is in the 500-range (500-599), its stacktrace will always - be logged as if it were an unhandled exception. - - If development mode is enabled, the error's stacktrace will be logged as a - warning if the status code is in the 400-range (400-499) or as a regular - message otherwise. - -* **reason**: `string` (optional) - - Message for the exception. - - If omitted, the status message corresponding to the status code will be - used instead. - -* **options**: `object` (optional) - - An object with any of the following properties: - - * **cause**: `Error` (optional) - - Cause of the exception that will be logged as part of the error's stacktrace - (recursively, if the exception also has a `cause` property and so on). - - * **extra**: `object` (optional) - - Additional properties that will be added to the error response body - generated by Foxx. - - If development mode is enabled, an `exception` property will be added to - this value containing the error message and a `stacktrace` property will - be added containing an array with each line of the error's stacktrace. - -If an error is passed instead of an options object it will be interpreted as -the *cause* option. If no reason was provided the error's `message` will be -used as the reason instead. - -Returns nothing. - -type ----- - -`res.type([type]): string` - -Sets the response content-type to the given type if provided or returns the -previously set content-type. - -**Arguments** - -* **type**: `string` (optional) - - Content-type of the response body. - - Unlike `res.set('content-type', type)` file extensions can be provided as - values and will be translated to the corresponding MIME type (e.g. `json` - becomes `application/json`). - -Returns the content-type of the response body. - -vary ----- - -`res.vary(names): this` - -`res.vary(...names): this` - -This method wraps the `vary` header manipulation method of the -[vary module](https://github.com/jshttp/vary) for the current response. - -The given names will be added to the response's `vary` header if not already present. - -Returns the response object. - -**Examples** - -```js -res.vary('user-agent'); -res.vary('cookie'); -res.vary('cookie'); // duplicates will be ignored - -// -- or -- - -res.vary('user-agent', 'cookie'); - -// -- or -- - -res.vary(['user-agent', 'cookie']); -``` - -write ------ - -`res.write(data): this` - -Appends the given data to the response body. - -**Arguments** - -* **data**: `string | Buffer` - - Data to append. - - If the data is a buffer the response body will be converted to a buffer first. - - If the response body is a buffer the data will be converted to a buffer first. - - If the data is an object it will be converted to a JSON string first. - - If the data is any other non-string value it will be converted to a string first. - -Returns the response object. diff --git a/Documentation/Books/Manual/Foxx/Reference/Sessions/README.md b/Documentation/Books/Manual/Foxx/Reference/Sessions/README.md deleted file mode 100644 index aff4419f9270..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Sessions/README.md +++ /dev/null @@ -1,84 +0,0 @@ -Session Middleware -================== - -`const sessionMiddleware = require('@arangodb/foxx/sessions');` - -The session middleware adds the `session` and `sessionStorage` properties to -the [request object](../Routers/Request.md) and deals with serializing and -deserializing the session as well as extracting session identifiers from -incoming requests and injecting them into outgoing responses. - -**Examples** - -```js -// Create a session middleware -const sessions = sessionsMiddleware({ - storage: module.context.collection('sessions'), - transport: ['header', 'cookie'] -}); -// First enable the middleware for this service -module.context.use(sessions); -// Now mount the routers that use the session -const router = createRouter(); -module.context.use(router); - -router.get('/', function (req, res) { - res.send(`Hello ${req.session.uid || 'anonymous'}!`); -}, 'hello'); - -router.post('/login', function (req, res) { - req.session.uid = req.body; - req.sessionStorage.save(req.session); - res.redirect(req.reverse('hello')); -}); -.body(['text/plain'], 'Username'); -``` - -Creating a session middleware ------------------------------ - -`sessionMiddleware(options): Middleware` - -Creates a session middleware. - -**Arguments** - -* **options**: `Object` - - An object with the following properties: - - * **storage**: `Storage` - - Storage that will be used to persist the sessions. - - The storage is also exposed as the `sessionStorage` on all request objects - and as the `storage` property of the middleware. - - If a string or collection is passed instead of a Storage, it will be used - to create a [Collection Storage](Storages/Collection.md). - - * **transport**: `Transport | Array` - - Transport or array of transports that will be used to extract the session - identifiers from incoming requests and inject them into outgoing responses. - When attempting to extract a session identifier, the transports will be - used in the order specified until a match is found. When injecting - (or clearing) session identifiers, all transports will be invoked. - - The transports are also exposed as the `transport` property of the middleware. - - If the string `"cookie"` is passed instead of a Transport, the - [Cookie Transport](Transports/Cookie.md) will be used with the default - settings instead. - - If the string `"header"` is passed instead of a Transport, the - [Header Transport](Transports/Header.md) will be used with the default - settings instead. - - * **autoCreate**: `boolean` (Default: `true`) - - If enabled the session storage's `new` method will be invoked to create an - empty session whenever the transport failed to return a session for the - incoming request. Otherwise the session will be initialized as `null`. - -Returns the session middleware. diff --git a/Documentation/Books/Manual/Foxx/Reference/Sessions/Storages/Collection.md b/Documentation/Books/Manual/Foxx/Reference/Sessions/Storages/Collection.md deleted file mode 100644 index 94bc9fa40f33..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Sessions/Storages/Collection.md +++ /dev/null @@ -1,89 +0,0 @@ -Collection Session Storage -========================== - -`const collectionStorage = require('@arangodb/foxx/sessions/storages/collection');` - -The collection session storage persists sessions to a collection in the database. - -Creating a storage ------------------- - -`collectionStorage(options): Storage` - -Creates a [Storage](README.md) that can be used in the sessions middleware. - -**Arguments** - -* **options**: `Object` - - An object with the following properties: - - * **collection**: `ArangoCollection` - - The collection that should be used to persist the sessions. - If a string is passed instead of a collection it is assumed to be the fully - qualified name of a collection in the current database. - - * **ttl**: `number` (Default: `60 * 60`) - - The time in seconds since the last update until a session will be - considered expired. - - * **pruneExpired**: `boolean` (Default: `false`) - - Whether expired sessions should be removed from the collection when they - are accessed instead of simply being ignored. - - * **autoUpdate**: `boolean` (Default: `true`) - - Whether sessions should be updated in the collection every time they - are accessed to keep them from expiring. Disabling this option - **will improve performance** but means you will have to take care of - keeping your sessions alive yourself. - -If a string or collection is passed instead of an options object, it will -be interpreted as the *collection* option. - -prune ------ - -`storage.prune(): Array` - -Removes all expired sessions from the collection. This method should be called -even if the *pruneExpired* option is enabled to clean up abandoned sessions. - -Returns an array of the keys of all sessions that were removed. - -save ----- - -`storage.save(session): Session` - -Saves (replaces) the given session object in the collection. This method needs -to be invoked explicitly after making changes to the session or the changes -will not be persisted. Assigns a new `_key` to the session if it previously -did not have one. - -**Arguments** - -* **session**: `Session` - - A session object. - -Returns the modified session. - -clear ------ - -`storage.clear(session): boolean` - -Removes the session from the collection. Has no effect if the session was -already removed or has not yet been saved to the collection (i.e. has no `_key`). - -**Arguments** - -* **session**: `Session` - - A session object. - -Returns `true` if the session was removed or `false` if it had no effect. diff --git a/Documentation/Books/Manual/Foxx/Reference/Sessions/Storages/JWT.md b/Documentation/Books/Manual/Foxx/Reference/Sessions/Storages/JWT.md deleted file mode 100644 index 2cbe098d0b52..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Sessions/Storages/JWT.md +++ /dev/null @@ -1,72 +0,0 @@ -JWT Session Storage -=================== - -`const jwtStorage = require('@arangodb/foxx/sessions/storages/jwt');` - -The JWT session storage converts sessions to and from -[JSON Web Tokens](https://jwt.io/). - -**Examples** - -```js -// Pass in a secure secret from the Foxx configuration -const secret = module.context.configuration.jwtSecret; -const sessions = sessionsMiddleware({ - storage: jwtStorage(secret), - transport: 'header' -}); -module.context.use(sessions); -``` - -Creating a storage ------------------- - -`jwtStorage(options): Storage` - -Creates a [Storage](README.md) that can be used in the sessions middleware. - -**Note:** while the "none" algorithm (i.e. no signature) is supported this -dummy algorithm provides no security and allows clients to make arbitrary -modifications to the payload and should not be used unless you are certain -you specifically need it. - -**Arguments** - -* **options**: `Object` - - An object with the following properties: - - * **algorithm**: `string` (Default: `"HS512"`) - - The algorithm to use for signing the token. - - Supported values: - - * `"HS256"` (HMAC-SHA256) - * `"HS384"` (HMAC-SHA384) - * `"HS512"` (HMAC-SHA512) - * `"none"` (no signature) - - * **secret**: `string` - - The secret to use for signing the token. - - This field is forbidden when using the "none" algorithm but required otherwise. - - * **ttl**: `number` (Default: `3600`) - - The maximum lifetime of the token in seconds. You may want to keep this - short as a new token is generated on every request allowing clients to - refresh tokens automatically. - - * **verify**: `boolean` (Default: `true`) - - If set to `false` the signature will not be verified but still generated - (unless using the "none" algorithm). - - * **maxExp**: `number` (Default: `Infinity`) - - Largest value that will be accepted in an incoming JWT `exp` (expiration) field. - -If a string is passed instead of an options object it will be interpreted -as the *secret* option. diff --git a/Documentation/Books/Manual/Foxx/Reference/Sessions/Storages/README.md b/Documentation/Books/Manual/Foxx/Reference/Sessions/Storages/README.md deleted file mode 100644 index 735b2cdb10ec..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Sessions/Storages/README.md +++ /dev/null @@ -1,100 +0,0 @@ -Session Storages -================ - -Session storages are used by the sessions middleware to persist sessions across -requests. Session storages must implement the `fromClient` and `forClient` -methods and can optionally implement the `new` method. - -The built-in session storages generally provide the following attributes: - -* **uid**: `string` (Default: `null`) - - A unique identifier indicating the active user. - -* **created**: `number` (Default: `Date.now()`) - - The numeric timestamp of when the session was created. - -* **data**: `any` (Default: `null`) - - Arbitrary data to persisted in the session. - -new ---- - -`storage.new(): Session` - -Generates a new session object representing an empty session. -The empty session object should not be persisted unless necessary. -The return value will be exposed by the middleware as the `session` property -of the request object if no session identifier was returned by the session -transports and auto-creation is not explicitly disabled in the session middleware. - -**Examples** - -```js -new() { - return { - uid: null, - created: Date.now(), - data: null - }; -} -``` - -fromClient ----------- - -`storage.fromClient(sid): Session | null` - -Resolves or deserializes a session identifier to a session object. - -**Arguments** - -* **sid**: `string` - - Session identifier to resolve or deserialize. - -Returns a session object representing the session with the given session -identifier that will be exposed by the middleware as the `session` property of -the request object. This method will only be called if any of the session transports -returned a session identifier. If the session identifier is invalid or expired, -the method should return a `null` value to indicate no matching session. - -**Examples** - -```js -fromClient(sid) { - return db._collection('sessions').firstExample({_key: sid}); -} -``` - -forClient ---------- - -`storage.forClient(session): string | null` - -Derives a session identifier from the given session object. - -**Arguments** - -* **session**: `Session` - - Session to derive a session identifier from. - -Returns a session identifier for the session represented by the given -session object. This method will be called with the `session` property -of the request object unless that property is empty (e.g. `null`). - -**Examples** - -```js -forClient(session) { - if (!session._key) { - const meta = db._collection('sessions').save(session); - return meta._key; - } - db._collection('sessions').replace(session._key, session); - return session._key; -} -``` diff --git a/Documentation/Books/Manual/Foxx/Reference/Sessions/Transports/Cookie.md b/Documentation/Books/Manual/Foxx/Reference/Sessions/Transports/Cookie.md deleted file mode 100644 index 041a86434c79..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Sessions/Transports/Cookie.md +++ /dev/null @@ -1,77 +0,0 @@ -Cookie Session Transport -======================== - -`const cookieTransport = require('@arangodb/foxx/sessions/transports/cookie');` - -The cookie transport stores session identifiers in cookies on the request and -response object. - -**Examples** - -```js -// Pass in a secure secret from the Foxx configuration -const secret = module.context.configuration.cookieSecret; -const sessions = sessionsMiddleware({ - storage: module.context.collection('sessions'), - transport: cookieTransport({ - name: 'FOXXSESSID', - ttl: 60 * 60 * 24 * 7, // one week in seconds - algorithm: 'sha256', - secret: secret - }) -}); -module.context.use(sessions); -``` - -Creating a transport --------------------- - -`cookieTransport([options]): Transport` - -Creates a [Transport](README.md) that can be used in the sessions middleware. - -**Arguments** - -* **options**: `Object` (optional) - - An object with the following properties: - - * **name**: `string` (Default: `"sid"`) - - The name of the cookie. - - * **ttl**: `number` (optional) - - Cookie lifetime in seconds. Note that this does not affect the storage TTL - (i.e. how long the session itself is considered valid), just how long the - cookie should be stored by the client. - - * **algorithm**: `string` (optional) - - The algorithm used to sign and verify the cookie. If no algorithm is - specified, the cookie will not be signed or verified. - See the [cookie method on the response object](../../Routers/Response.md). - - * **secret**: `string` (optional) - - Secret to use for the signed cookie. Will be ignored if no algorithm is provided. - - * **path**: `string` (optional) - - Path for which the cookie should be issued. - - * **domain**: `string` (optional) - - Domain for which the cookie should be issued. - - * **secure**: `boolean` (Default: `false`) - - Whether the cookie should be marked as secure (i.e. HTTPS/SSL-only). - - * **httpOnly**: `boolean` (Default: `false`) - - Whether the cookie should be marked as HTTP-only (rather than also - exposing it to client-side code). - -If a string is passed instead of an options object, it will be interpreted -as the *name* option. diff --git a/Documentation/Books/Manual/Foxx/Reference/Sessions/Transports/Header.md b/Documentation/Books/Manual/Foxx/Reference/Sessions/Transports/Header.md deleted file mode 100644 index b416fbbeb598..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Sessions/Transports/Header.md +++ /dev/null @@ -1,37 +0,0 @@ -Header Session Transport -======================== - -`const headerTransport = require('@arangodb/foxx/sessions/transports/header');` - -The header transport stores session identifiers in headers on the request -and response objects. - -**Examples** - -```js -const sessions = sessionsMiddleware({ - storage: module.context.collection('sessions'), - transport: headerTransport('X-FOXXSESSID') -}); -module.context.use(sessions); -``` - -Creating a transport --------------------- - -`headerTransport([options]): Transport` - -Creates a [Transport](README.md) that can be used in the sessions middleware. - -**Arguments** - -* **options**: `Object` (optional) - - An object with the following properties: - - * **name**: `string` (Default: `X-Session-Id`) - - Name of the header that contains the session identifier (not case sensitive). - -If a string is passed instead of an options object, it will be interpreted -as the *name* option. diff --git a/Documentation/Books/Manual/Foxx/Reference/Sessions/Transports/README.md b/Documentation/Books/Manual/Foxx/Reference/Sessions/Transports/README.md deleted file mode 100644 index cf8719359da3..000000000000 --- a/Documentation/Books/Manual/Foxx/Reference/Sessions/Transports/README.md +++ /dev/null @@ -1,84 +0,0 @@ -Session Transports -================== - -Session transports are used by the sessions middleware to store and retrieve -session identifiers in requests and responses. Session transports must -implement the `get` and/or `set` methods and can optionally implement the -`clear` method. - -get ---- - -`transport.get(request): string | null` - -Retrieves a session identifier from a request object. - -If present this method will automatically be invoked for each transport until -a transport returns a session identifier. - -**Arguments** - -* **request**: `Request` - - [Request object](../../Routers/Request.md) to extract a session identifier from. - -Returns the session identifier or `null` if the transport can not find a -session identifier in the request. - -**Examples** - -```js -get(req) { - return req.get('x-session-id') || null; -} -``` - -set ---- - -`transport.set(response, sid): void` - -Attaches a session identifier to a response object. - -If present this method will automatically be invoked at the end of a request -regardless of whether the session was modified or not. - -**Arguments** - -* **response**: `Response` - - [Response object](../../Routers/Response.md) to attach a session identifier to. - -* **sid**: `string` - - Session identifier to attach to the response. - -Returns nothing. - -**Examples** - -```js -set(res) { - res.set('x-session-id', value); -} -``` - -clear ------ - -`transport.clear(response): void` - -Attaches a payload indicating that the session has been cleared to the -response object. This can be used to clear a session cookie when the session -has been destroyed (e.g. during logout). - -If present this method will automatically be invoked instead of `set` when the -`req.session` attribute was removed by the route handler. - -**Arguments** - -* **response**: `Response` - - Response object to remove the session identifier from. - -Returns nothing. diff --git a/Documentation/Books/Manual/GettingStarted/Authentication.md b/Documentation/Books/Manual/GettingStarted/Authentication.md deleted file mode 100644 index da343eba216e..000000000000 --- a/Documentation/Books/Manual/GettingStarted/Authentication.md +++ /dev/null @@ -1,29 +0,0 @@ -Authentication -============== - -ArangoDB allows to restrict access to databases to certain users. All -users of the system database are considered administrators. During -installation a default user *root* is created, which has access to -all databases. - -You should create a database for your application together with a -user that has access rights to this database. See -[Managing Users](../Administration/ManagingUsers/README.md). - -Use the *arangosh* to create a new database and user. - -``` -arangosh> db._createDatabase("example"); -arangosh> var users = require("@arangodb/users"); -arangosh> users.save("root@example", "password"); -arangosh> users.grantDatabase("root@example", "example"); -``` - -You can now connect to the new database using the user -*root@example*. - -``` -shell> arangosh --server.username "root@example" --server.database example -``` - - diff --git a/Documentation/Books/Manual/GettingStarted/ComingFromSql.md b/Documentation/Books/Manual/GettingStarted/ComingFromSql.md deleted file mode 100644 index 9cde46ed3a22..000000000000 --- a/Documentation/Books/Manual/GettingStarted/ComingFromSql.md +++ /dev/null @@ -1,142 +0,0 @@ -Coming from SQL -=============== - -If you worked with a relational database management system (RDBMS) such as MySQL, -MariaDB or PostgreSQL, you will be familiar with its query language, a dialect -of SQL (Structured Query Language). - -ArangoDB's query language is called AQL. There are some similarities between both -languages despite the different data models of the database systems. The most -notable difference is probably the concept of loops in AQL, which makes it feel -more like a programming language. It suits the schema-less model more natural -and makes the query language very powerful while remaining easy to read and write. - -To get started with AQL, have a look at our detailed -[comparison of SQL and AQL](https://arangodb.com/why-arangodb/sql-aql-comparison/). -It will also help you to translate SQL queries to AQL when migrating to ArangoDB. - -{% hint 'info' %} -You may also be interested in the white paper -[**Switching from Relational Databases to ArangoDB**](https://www.arangodb.com/white-paper-switching-relational-database/) -on our website! -{% endhint %} - -Basic queries -------------- - -**How do select lists translate to AQL queries?** - -In traditional SQL you may either fetch all columns of a table row by row, using -`SELECT * FROM table`, or select a subset of the columns. The list of table -columns to fetch is commonly called *select list*: - -```sql -SELECT columnA, columnB, columnZ FROM table -``` - -Since documents aren't two-dimensional, and neither do you want to be limited to -returning two-dimensional lists, the requirements for a query language are higher. -AQL is thus a little bit more complex than plain SQL at first, but offers much -more flexibility in the long run. It lets you handle arbitrarily structured -documents in convenient ways, mostly leaned on the syntax used in JavaScript. - -**Composing the documents to be returned** - -The AQL `RETURN` statement returns one item per document it is handed. You can -return the whole document, or just parts of it. Given that *oneDocument* is -a document (retrieved like `LET oneDocument = DOCUMENT("myusers/3456789")` -for instance), it can be returned as-is like this: - -```js -RETURN oneDocument -``` - -```json -[ - { - "_id": "myusers/3456789", - "_key": "3456789", - "_rev": "14253647", - "firstName": "John", - "lastName": "Doe", - "address": { - "city": "Gotham", - "street": "Road To Nowhere 1" - }, - "hobbies": [ - { "name": "swimming", "howFavorite": 10 }, - { "name": "biking", "howFavorite": 6 }, - { "name": "programming", "howFavorite": 4 } - ] - } -] -``` - -Return the hobbies sub-structure only: - -```js -RETURN oneDocument.hobbies -``` - -```json -[ - [ - { "name": "swimming", "howFavorite": 10 }, - { "name": "biking", "howFavorite": 6 }, - { "name": "programming", "howFavorite": 4 } - ] -] -``` - -Return the hobbies and the address: - -```js -RETURN { - hobbies: oneDocument.hobbies, - address: oneDocument.address -} -``` - -```json -[ - { - "hobbies": [ - { "name": "swimming", "howFavorite": 10 }, - { "name": "biking", "howFavorite": 6 }, - { "name": "programming", "howFavorite": 4 } - ], - "address": { - "city": "Gotham", - "street": "Road To Nowhere 1" - } - } -] -``` - -Return the first hobby only: - -```js -RETURN oneDocument.hobbies[0].name -``` - -```json -[ - "swimming" -] -``` - -Return a list of all hobby strings: - -```js -RETURN { hobbies: oneDocument.hobbies[*].name } -``` - -```json -[ - { "hobbies": ["swimming", "biking", "porgramming"] } -] -``` - -More complex [array](../../AQL/Functions/Array.html) and -[object manipulations](../../AQL/Functions/Document.html) can be done using -AQL functions and [operators](../../AQL/Operators.html). diff --git a/Documentation/Books/Manual/GettingStarted/DatabasesCollectionsDocuments.md b/Documentation/Books/Manual/GettingStarted/DatabasesCollectionsDocuments.md deleted file mode 100644 index abd8f4abc9bc..000000000000 --- a/Documentation/Books/Manual/GettingStarted/DatabasesCollectionsDocuments.md +++ /dev/null @@ -1,39 +0,0 @@ -Databases, Collections and Documents -==================================== - -Databases are sets of collections. Collections store records, which are referred -to as documents. Collections are the equivalent of tables in RDBMS, and -documents can be thought of as rows in a table. The difference is that you don't -define what columns (or rather attributes) there will be in advance. Every -document in any collection can have arbitrary attribute keys and -values. Documents in a single collection will likely have a similar structure in -practice however, but the database system itself does not impose it and will -operate stable and fast no matter how your data looks like. - -Read more in the [data-model concepts](../DataModeling/Concepts.md) chapter. - -For now, you can stick with the default `_system` database and use the web -interface to create collections and documents. Start by clicking the -*COLLECTIONS* menu entry, then the *Add Collection* tile. Give it a name, e.g. -*users*, leave the other settings unchanged (we want it to be a document -collection) and *Save* it. A new tile labeled *users* should show up, which -you can click to open. - -There will be *No documents* yet. Click the green circle with the white plus -on the right-hand side to create a first document in this collection. A dialog -will ask you for a `_key`. You can leave the field blank and click *Create* to -let the database system assign an automatically generated (unique) key. Note -that the `_key` property is immutable, which means you can not change it once -the document is created. What you can use as document key is described in the -[naming conventions](../DataModeling/NamingConventions/DocumentKeys.md). - -An automatically generated key could be `"9883"` (`_key` is always a string!), -and the document `_id` would be `"users/9883"` in that case. Aside from a few -system attributes, there is nothing in this document yet. Let's add a custom -attribute by clicking the icon to the left of *(empty object)*, then *Append*. -Two input fields will become available, *FIELD* (attribute key) and *VALUE* -(attribute value). Type `name` as key and your name as value. *Append* another -attribute, name it `age` and set it to your age. Click *Save* to persist the -changes. If you click on *Collection: users* at the top on the right-hand side -of the ArangoDB logo, the document browser will show the documents in the -*users* collection and you will see the document you just created in the list. diff --git a/Documentation/Books/Manual/GettingStarted/Installation.md b/Documentation/Books/Manual/GettingStarted/Installation.md deleted file mode 100644 index 4ee1b8f587fc..000000000000 --- a/Documentation/Books/Manual/GettingStarted/Installation.md +++ /dev/null @@ -1,46 +0,0 @@ -Installation -============ - -Head to [arangodb.com/download](https://www.arangodb.com/download/), -select your operating system and download ArangoDB. You may also follow -the instructions on how to install with a package manager, if available. - -If you installed a binary package under Linux, the server is -automatically started. - -If you installed ArangoDB using homebrew under macOS, start the -server by running `/usr/local/sbin/arangod`. - -If you installed ArangoDB under Windows as a service, the server is -automatically started. Otherwise, run the `arangod.exe` located in the -installation folder's `bin` directory. You may have to run it as administrator -to grant it write permissions to `C:\Program Files`. - -For more in-depth information on how to install ArangoDB, as well as available -startup parameters, installation in a cluster and so on, see -[Installation](../Installation/README.md) and -[Deployment](../Deployment/README.md). - -{% hint 'info' %} -ArangoDB offers two [**storage engines**](../Architecture/StorageEngines.md): -MMFiles and RocksDB. Choose the one which suits your needs best in the -installation process or on first startup. -{% endhint %} - - -Securing the installation -------------------------- - -The default installation contains one database *_system* and a user -named *root*. - -Debian based packages and the Windows installer will ask for a -password during the installation process. Red-Hat based packages will -set a random password. For all other installation packages you need to -execute - -``` -shell> arango-secure-installation -``` - -This will ask for a root password and sets this password. \ No newline at end of file diff --git a/Documentation/Books/Manual/GettingStarted/NextSteps.md b/Documentation/Books/Manual/GettingStarted/NextSteps.md deleted file mode 100644 index c77ffc8298c0..000000000000 --- a/Documentation/Books/Manual/GettingStarted/NextSteps.md +++ /dev/null @@ -1,16 +0,0 @@ -Next Steps -========== - -There is a lot more to [discover in AQL](../../AQL/index.html) and much more -functionality that ArangoDB offers. Continue reading the other chapters and -experiment with a test database to foster your knowledge. - -If you want to write more AQL queries right now, have a look here: - -- [Data Queries](../../AQL/DataQueries.html): data access and modification queries -- [High-level operations](../../AQL/Operations/index.html): detailed descriptions - of `FOR`, `FILTER` and more operations not shown in this introduction -- [Functions](../../AQL/Functions/index.html): a reference of all provided functions - -Visit the [ArangoDB Training Center](https://www.arangodb.com/arangodb-training-center/) -for courses, tutorials and more. \ No newline at end of file diff --git a/Documentation/Books/Manual/GettingStarted/QueryingTheDatabase.md b/Documentation/Books/Manual/GettingStarted/QueryingTheDatabase.md deleted file mode 100644 index 294f4813e4aa..000000000000 --- a/Documentation/Books/Manual/GettingStarted/QueryingTheDatabase.md +++ /dev/null @@ -1,426 +0,0 @@ -Querying the Database -===================== - -Time to retrieve our document using AQL, ArangoDB's query language. We can -directly look up the document we created via the `_id`, but there are also -other options. Click the *QUERIES* menu entry to bring up the query editor -and type the following (adjust the document ID to match your document): - -```js -RETURN DOCUMENT("users/9883") -``` - -Then click *Execute* to run the query. The result appears below the query editor: - -```json -[ - { - "_key": "9883", - "_id": "users/9883", - "_rev": "9883", - "age": 32, - "name": "John Smith" - } -] -``` - -As you can see, the entire document including the system attributes is returned. -[DOCUMENT()](../../AQL/Functions/Miscellaneous.html) is a function to retrieve -a single document or a list of documents of which you know the `_key`s or `_id`s. -We return the result of the function call as our query result, which is our -document inside of the result array (we could have returned more than one result -with a different query, but even for a single document as result, we still get -an array at the top level). - -This type of query is called data access query. No data is created, changed or -deleted. There is another type of query called data modification query. Let's -insert a second document using a modification query: - -```js -INSERT { name: "Katie Foster", age: 27 } INTO users -``` - -The query is pretty self-explanatory: the `INSERT` keyword tells ArangoDB that -we want to insert something. What to insert, a document with two attributes in -this case, follows next. The curly braces `{ }` signify documents, or objects. -When talking about records in a collection, we call them documents. Encoded as -JSON, we call them objects. Objects can also be nested. Here's an example: - -```json -{ - "name": { - "first": "Katie", - "last": "Foster" - } -} -``` - -`INTO` is a mandatory part of every `INSERT` operation and is followed by the -collection name that we want to store the document in. Note that there are no -quote marks around the collection name. - -If you run above query, there will be an empty array as result because we did -not specify what to return using a `RETURN` keyword. It is optional in -modification queries, but mandatory in data access queries. Even with `RETURN`, -the return value can still be an empty array, e.g. if the specified document -was not found. Despite the empty result, the above query still created a new -user document. You can verify this with the document browser. - -Let's add another user, but return the newly created document this time: - -```js -INSERT { name: "James Hendrix", age: 69 } INTO users -RETURN NEW -``` - -`NEW` is a pseudo-variable, which refers to the document created by `INSERT`. -The result of the query will look like this: - -```json -[ - { - "_key": "10074", - "_id": "users/10074", - "_rev": "10074", - "age": 69, - "name": "James Hendrix" - } -] -``` - -Now that we have 3 users in our collection, how to retrieve them all with a -single query? The following **does not work**: - -```js -RETURN DOCUMENT("users/9883") -RETURN DOCUMENT("users/9915") -RETURN DOCUMENT("users/10074") -``` - -There can only be a single `RETURN` statement here and a syntax error is raised -if you try to execute it. The `DOCUMENT()` function offers a secondary signature -to specify multiple document handles, so we could do: - -```js -RETURN DOCUMENT( ["users/9883", "users/9915", "users/10074"] ) -``` - -An array with the `_id`s of all 3 documents is passed to the function. Arrays -are denoted by square brackets `[ ]` and their elements are separated by commas. - -But what if we add more users? We would have to change the query to retrieve -the newly added users as well. All we want to say with our query is: "For every -user in the collection users, return the user document". We can formulate this -with a `FOR` loop: - -```js -FOR user IN users - RETURN user -``` - -It expresses to iterate over every document in `users` and to use `user` as -variable name, which we can use to refer to the current user document. It could -also be called `doc`, `u` or `ahuacatlguacamole`, this is up to you. It is -advisable to use a short and self-descriptive name however. - -The loop body tells the system to return the value of the variable `user`, -which is a single user document. All user documents are returned this way: - -```json -[ - { - "_key": "9915", - "_id": "users/9915", - "_rev": "9915", - "age": 27, - "name": "Katie Foster" - }, - { - "_key": "9883", - "_id": "users/9883", - "_rev": "9883", - "age": 32, - "name": "John Smith" - }, - { - "_key": "10074", - "_id": "users/10074", - "_rev": "10074", - "age": 69, - "name": "James Hendrix" - } -] -``` - -You may have noticed that the order of the returned documents is not necessarily -the same as they were inserted. There is no order guaranteed unless you explicitly -sort them. We can add a `SORT` operation very easily: - -```js -FOR user IN users - SORT user._key - RETURN user -``` - -This does still not return the desired result: James (10074) is returned before -John (9883) and Katie (9915). The reason is that the `_key` attribute is a string -in ArangoDB, and not a number. The individual characters of the strings are -compared. `1` is lower than `9` and the result is therefore "correct". If we -wanted to use the numerical value of the `_key` attributes instead, we could -convert the string to a number and use it for sorting. There are some implications -however. We are better off sorting something else. How about the age, in descending -order? - -```js -FOR user IN users - SORT user.age DESC - RETURN user -``` - -The users will be returned in the following order: James (69), John (32), Katie -(27). Instead of `DESC` for descending order, `ASC` can be used for ascending -order. `ASC` is the default though and can be omitted. - -We might want to limit the result set to a subset of users, based on the age -attribute for example. Let's return users older than 30 only: - -```js -FOR user IN users - FILTER user.age > 30 - SORT user.age - RETURN user -``` - -This will return John and James (in this order). Katie's age attribute does not -fulfill the criterion (greater than 30), she is only 27 and therefore not part -of the result set. We can make her age to return her user document again, using -a modification query: - -```js -UPDATE "9915" WITH { age: 40 } IN users -RETURN NEW -``` - -`UPDATE` allows to partially edit an existing document. There is also `REPLACE`, -which would remove all attributes (except for `_key` and `_id`, which remain the -same) and only add the specified ones. `UPDATE` on the other hand only replaces -the specified attributes and keeps everything else as-is. - -The `UPDATE` keyword is followed by the document key (or a document / object -with a `_key` attribute) to identify what to modify. The attributes to update -are written as object after the `WITH` keyword. `IN` denotes in which collection -to perform this operation in, just like `INTO` (both keywords are actually -interchangeable here). The full document with the changes applied is returned -if we use the `NEW` pseudo-variable: - -```json -[ - { - "_key": "9915", - "_id": "users/9915", - "_rev": "12864", - "age": 40, - "name": "Katie Foster" - } -] -``` - -If we used `REPLACE` instead, the name attribute would be gone. With `UPDATE`, -the attribute is kept (the same would apply to additional attributes if we had -them). - -Let us run our `FILTER` query again, but only return the user names this time: - -```js -FOR user IN users - FILTER user.age > 30 - SORT user.age - RETURN user.name -``` - -This will return the names of all 3 users: - -```json -[ - "John Smith", - "Katie Foster", - "James Hendrix" -] -``` - -It is called a projection if only a subset of attributes is returned. Another -kind of projection is to change the structure of the results: - -``` -FOR user IN users - RETURN { userName: user.name, age: user.age } -``` - -The query defines the output format for every user document. The user name is -returned as `userName` instead of `name`, the age keeps the attribute key in -this example: - -```json -[ - { - "userName": "James Hendrix", - "age": 69 - }, - { - "userName": "John Smith", - "age": 32 - }, - { - "userName": "Katie Foster", - "age": 40 - } -] -``` - -It is also possible to compute new values: - -```js -FOR user IN users - RETURN CONCAT(user.name, "'s age is ", user.age) -``` - -`CONCAT()` is a function that can join elements together to a string. We use it -here to return a statement for every user. As you can see, the result set does -not always have to be an array of objects: - -```json -[ - "James Hendrix's age is 69", - "John Smith's age is 32", - "Katie Foster's age is 40" -] -``` - -Now let's do something crazy: for every document in the users collection, -iterate over all user documents again and return user pairs, e.g. John and Katie. -We can use a loop inside a loop for this to get the cross product (every possible -combination of all user records, 3 \* 3 = 9). We don't want pairings like *John + -John* however, so let's eliminate them with a filter condition: - -```js -FOR user1 IN users - FOR user2 IN users - FILTER user1 != user2 - RETURN [user1.name, user2.name] -``` - -We get 6 pairings. Pairs like *James + John* and *John + James* are basically -redundant, but fair enough: - -```json -[ - [ "James Hendrix", "John Smith" ], - [ "James Hendrix", "Katie Foster" ], - [ "John Smith", "James Hendrix" ], - [ "John Smith", "Katie Foster" ], - [ "Katie Foster", "James Hendrix" ], - [ "Katie Foster", "John Smith" ] -] -``` - -We could calculate the sum of both ages and compute something new this way: - -```js -FOR user1 IN users - FOR user2 IN users - FILTER user1 != user2 - RETURN { - pair: [user1.name, user2.name], - sumOfAges: user1.age + user2.age - } -``` - -We introduce a new attribute `sumOfAges` and add up both ages for the value: - -```json -[ - { - "pair": [ "James Hendrix", "John Smith" ], - "sumOfAges": 101 - }, - { - "pair": [ "James Hendrix", "Katie Foster" ], - "sumOfAges": 109 - }, - { - "pair": [ "John Smith", "James Hendrix" ], - "sumOfAges": 101 - }, - { - "pair": [ "John Smith", "Katie Foster" ], - "sumOfAges": 72 - }, - { - "pair": [ "Katie Foster", "James Hendrix" ], - "sumOfAges": 109 - }, - { - "pair": [ "Katie Foster", "John Smith" ], - "sumOfAges": 72 - } -] -``` - -If we wanted to post-filter on the new attribute to only return pairs with a -sum less than 100, we should define a variable to temporarily store the sum, -so that we can use it in a `FILTER` statement as well as in the `RETURN` -statement: - -```js -FOR user1 IN users - FOR user2 IN users - FILTER user1 != user2 - LET sumOfAges = user1.age + user2.age - FILTER sumOfAges < 100 - RETURN { - pair: [user1.name, user2.name], - sumOfAges: sumOfAges - } -``` - -The `LET` keyword is followed by the designated variable name (`sumOfAges`), -then there's a `=` symbol and the value or an expression to define what value -the variable is supposed to have. We re-use our expression to calculate the -sum here. We then have another `FILTER` to skip the unwanted pairings and -make use of the variable we declared before. We return a projection with an -array of the user names and the calculated age, for which we use the variable -again: - -```json -[ - { - "pair": [ "John Smith", "Katie Foster" ], - "sumOfAges": 72 - }, - { - "pair": [ "Katie Foster", "John Smith" ], - "sumOfAges": 72 - } -] -``` - -Pro tip: when defining objects, if the desired attribute key and the variable -to use for the attribute value are the same, you can use a shorthand notation: -`{ sumOfAges }` instead of `{ sumOfAges: sumOfAges }`. - -Finally, let's delete one of the user documents: - -```js -REMOVE "9883" IN users -``` - -It deletes the user John (`_key: "9883"`). We could also remove documents in a -loop (same goes for `INSERT`, `UPDATE` and `REPLACE`): - -```js -FOR user IN users - FILTER user.age >= 30 - REMOVE user IN users -``` - -The query deletes all users whose age is greater than or equal to 30. diff --git a/Documentation/Books/Manual/GettingStarted/README.md b/Documentation/Books/Manual/GettingStarted/README.md deleted file mode 100644 index 2b841b6db5e2..000000000000 --- a/Documentation/Books/Manual/GettingStarted/README.md +++ /dev/null @@ -1,11 +0,0 @@ -Getting Started -=============== - -This beginner's guide will make you familiar with ArangoDB. -We will introduce core concepts and cover how to - -- install and run a local ArangoDB server -- use the web interface to interact with it -- store example data in the database -- query the database to retrieve the data again -- edit and remove existing data diff --git a/Documentation/Books/Manual/GettingStarted/WebInterface.md b/Documentation/Books/Manual/GettingStarted/WebInterface.md deleted file mode 100644 index 046edf2bde2d..000000000000 --- a/Documentation/Books/Manual/GettingStarted/WebInterface.md +++ /dev/null @@ -1,41 +0,0 @@ -Web Interface -============= - -The server itself (_arangod_) speaks HTTP / REST, but you can use the -graphical web interface to keep it simple. There is also -[arangosh](../Programs/Arangosh/README.md), a synchronous shell -for interaction with the server. If you are a developer, you might -prefer the shell over the GUI. It does not provide features like -syntax highlighting however. - -When you start using ArangoDB in your project, you will likely use an official -or community-made driver written in the same language as your project. Drivers -implement a programming interface that should feel natural for that programming -language, and do all the talking to the server. Therefore, you can most certainly -ignore the HTTP API unless you want to write a driver yourself or explicitly -want to use the raw interface. - -To get familiar with the database system you can even put drivers aside and -use the web interface (code name *Aardvark*) for basic interaction. -The web interface will become available shortly after you started `arangod`. -You can access it in your browser at http://localhost:8529 - if not, please -see [Troubleshooting](../Troubleshooting/README.md). - -By default, authentication is enabled. The default user is `root`. -Depending on the installation method used, the installation process either -prompted for the root password or the default root password is empty -(see [Securing the installation](Installation.md#securing-the-installation)). - -![Aardvark Login Form](../Programs/WebInterface/images/loginView.png) - -Next you will be asked which database to use. Every server instance comes with -a `_system` database. Select this database to continue. - -![select database](../Programs/WebInterface/images/selectDBView.png) - -You should then be presented the dashboard with server statistics like this: - -![Aardvark Dashboard Request Statistics](../Programs/WebInterface/images/dashboardView.png) - -For a more detailed description of the interface, see [Web Interface](../Programs/WebInterface/README.md). - diff --git a/Documentation/Books/Manual/Graphs/Edges/README.md b/Documentation/Books/Manual/Graphs/Edges/README.md deleted file mode 100644 index 3f524e28f78f..000000000000 --- a/Documentation/Books/Manual/Graphs/Edges/README.md +++ /dev/null @@ -1,36 +0,0 @@ -Edges, Identifiers, Handles -=========================== - -This is an introduction to ArangoDB's interface for edges. -Edges may be [used in graphs](../README.md). -Here we work with edges from the JavaScript shell *arangosh*. -For other languages see the corresponding language API. - -A graph data model always consists of at least two collections: the relations between the -nodes in the graphs are stored in an "edges collection", the nodes in the graph -are stored in documents in regular collections. - -Edges in ArangoDB are special documents. In addition to the system -attributes *_key*, *_id* and *_rev*, they have the attributes *_from* and *_to*, -which contain [document handles](../../Appendix/Glossary.md#document-handle), namely the start-point and the end-point of the edge. - -*Example*: - -- the "edge" collection stores the information that a company's reception is sub-unit to the services unit and the services unit is sub-unit to the - CEO. You would express this relationship with the *_from* and *_to* attributes -- the "normal" collection stores all the properties about the reception, e.g. that 20 people are working there and the room number etc -- *_from* is the [document handle](../../Appendix/Glossary.md#document-handle) of the linked vertex (incoming relation) -- *_to* is the document handle of the linked vertex (outgoing relation) - -[Edge collections](../../Appendix/Glossary.md#edge-collection) are special collections that store edge documents. Edge documents -are connection documents that reference other documents. The type of a collection -must be specified when a collection is created and cannot be changed afterwards. - -To change edge endpoints you would need to remove old document/edge and insert new one. -Other fields can be updated as in default collection. - -Working with Edges ------------------- - -Edges are normal [documents](../../DataModeling/Documents/DocumentMethods.md#edges) -that always contain a `_from` and a `_to` attribute. diff --git a/Documentation/Books/Manual/Graphs/GeneralGraphs/Functions.md b/Documentation/Books/Manual/Graphs/GeneralGraphs/Functions.md deleted file mode 100644 index f4b2a6b74ea5..000000000000 --- a/Documentation/Books/Manual/Graphs/GeneralGraphs/Functions.md +++ /dev/null @@ -1,992 +0,0 @@ -Graph Functions -=============== - -This chapter describes [various functions on a graph](../README.md). -A lot of these accept a vertex (or edge) example as parameter as defined in the next section. - - -Examples will explain the API on the [the city graph](../README.md#the-city-graph): - -![Social Example Graph](../cities_graph.png) - - -Definition of examples ----------------------- - -@startDocuBlock general_graph_example_description - -Get vertices from edges. ------------------------- - -### Get vertex *from* of an edge - - - -Get the source vertex of an edge - -`graph._fromVertex(edgeId)` - -Returns the vertex defined with the attribute *_from* of the edge with *edgeId* as its *_id*. - - -**Parameters** - * edgeId (required) *_id* attribute of the edge - - -**Examples** - - - @startDocuBlockInline generalGraphGetFromVertex - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphGetFromVertex} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("social"); - var any = require("@arangodb").db.relation.any(); - graph._fromVertex("relation/" + any._key); - ~ examples.dropGraph("social"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphGetFromVertex - - - -### Get vertex *to* of an edge - - - -Get the target vertex of an edge - -`graph._toVertex(edgeId)` - -Returns the vertex defined with the attribute *_to* of the edge with *edgeId* as its *_id*. - - -**Parameters** - - - * edgeId (required) *_id* attribute of the edge - - -**Examples** - - - @startDocuBlockInline generalGraphGetToVertex - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphGetToVertex} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("social"); - var any = require("@arangodb").db.relation.any(); - graph._toVertex("relation/" + any._key); - ~ examples.dropGraph("social"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphGetToVertex - - - -_neighbors ----------- - - - -Get all neighbors of the vertices defined by the example - -`graph._neighbors(vertexExample, options)` - -The function accepts an id, an example, a list of examples or even an empty -example as parameter for vertexExample. -The complexity of this method is **O(n\*m^x)** with *n* being the vertices defined by the -parameter vertexExamplex, *m* the average amount of neighbors and *x* the maximal depths. -Hence the default call would have a complexity of **O(n\*m)**; - - -**Parameters** - -- vertexExample (optional) See [Definition of examples](#definition-of-examples) -- options (optional) An object defining further options. Can have the following values: - - *direction*: The direction of the edges. Possible values are *outbound*, *inbound* and *any* (default). - - *edgeExamples*: Filter the edges, see [Definition of examples](#definition-of-examples) - - *neighborExamples*: Filter the neighbor vertices, see [Definition of examples](#definition-of-examples) - - *edgeCollectionRestriction* : One or a list of edge-collection names that should be - considered to be on the path. - - *vertexCollectionRestriction* : One or a list of vertex-collection names that should be - considered on the intermediate vertex steps. - - *minDepth*: Defines the minimal number of intermediate steps to neighbors (default is 1). - - *maxDepth*: Defines the maximal number of intermediate steps to neighbors (default is 1). - - -**Examples** - - -A route planner example, all neighbors of capitals. - - @startDocuBlockInline generalGraphModuleNeighbors1 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleNeighbors1} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._neighbors({isCapital : true}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleNeighbors1 - -A route planner example, all outbound neighbors of Hamburg. - - @startDocuBlockInline generalGraphModuleNeighbors2 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleNeighbors2} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._neighbors('germanCity/Hamburg', {direction : 'outbound', maxDepth : 2}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleNeighbors2 - - - -_commonNeighbors ----------------- - - - -Get all common neighbors of the vertices defined by the examples. - -`graph._commonNeighbors(vertex1Example, vertex2Examples, optionsVertex1, optionsVertex2)` - -This function returns the intersection of *graph_module._neighbors(vertex1Example, optionsVertex1)* -and *graph_module._neighbors(vertex2Example, optionsVertex2)*. -For parameter documentation see [_neighbors](#neighbors). - -The complexity of this method is **O(n\*m^x)** with *n* being the maximal amount of vertices -defined by the parameters vertexExamples, *m* the average amount of neighbors and *x* the -maximal depths. -Hence the default call would have a complexity of **O(n\*m)**; - - -**Examples** - - -A route planner example, all common neighbors of capitals. - - @startDocuBlockInline generalGraphModuleCommonNeighbors1 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleCommonNeighbors1} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._commonNeighbors({isCapital : true}, {isCapital : true}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleCommonNeighbors1 - -A route planner example, all common outbound neighbors of Hamburg with any other location -which have a maximal depth of 2 : - - @startDocuBlockInline generalGraphModuleCommonNeighbors2 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleCommonNeighbors2} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - | graph._commonNeighbors( - | 'germanCity/Hamburg', - | {}, - | {direction : 'outbound', maxDepth : 2}, - {direction : 'outbound', maxDepth : 2}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleCommonNeighbors2 - - - -_countCommonNeighbors ---------------------- - - - -Get the amount of common neighbors of the vertices defined by the examples. - -`graph._countCommonNeighbors(vertex1Example, vertex2Examples, optionsVertex1, optionsVertex2)` - -Similar to [_commonNeighbors](#commonneighbors) but returns count instead of the elements. - - -**Examples** - - -A route planner example, all common neighbors of capitals. - - @startDocuBlockInline generalGraphModuleCommonNeighborsAmount1 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleCommonNeighborsAmount1} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - var example = { isCapital: true }; - var options = { includeData: true }; - graph._countCommonNeighbors(example, example, options, options); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleCommonNeighborsAmount1 - -A route planner example, all common outbound neighbors of Hamburg with any other location -which have a maximal depth of 2 : - - @startDocuBlockInline generalGraphModuleCommonNeighborsAmount2 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleCommonNeighborsAmount2} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - var options = { direction: 'outbound', maxDepth: 2, includeData: true }; - graph._countCommonNeighbors('germanCity/Hamburg', {}, options, options); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleCommonNeighborsAmount2 - - - -_commonProperties ------------------ - - - -Get the vertices of the graph that share common properties. - -`graph._commonProperties(vertex1Example, vertex2Examples, options)` - -The function accepts an id, an example, a list of examples or even an empty -example as parameter for vertex1Example and vertex2Example. - -The complexity of this method is **O(n)** with *n* being the maximal amount of vertices -defined by the parameters vertexExamples. - - -**Parameters** - - -* vertex1Examples (optional) Filter the set of source vertices, see [Definition of examples](#definition-of-examples) - -- vertex2Examples (optional) Filter the set of vertices compared to, see [Definition of examples](#definition-of-examples) -- options (optional) An object defining further options. Can have the following values: - - *vertex1CollectionRestriction* : One or a list of vertex-collection names that should be - searched for source vertices. - - *vertex2CollectionRestriction* : One or a list of vertex-collection names that should be - searched for compare vertices. - - *ignoreProperties* : One or a list of attribute names of a document that should be ignored. - - -**Examples** - - -A route planner example, all locations with the same properties: - - @startDocuBlockInline generalGraphModuleProperties1 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleProperties1} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._commonProperties({}, {}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleProperties1 - -A route planner example, all cities which share same properties except for population. - - @startDocuBlockInline generalGraphModuleProperties2 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleProperties2} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._commonProperties({}, {}, {ignoreProperties: 'population'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleProperties2 - - - -_countCommonProperties ----------------------- - - - -Get the amount of vertices of the graph that share common properties. - -`graph._countCommonProperties(vertex1Example, vertex2Examples, options)` - -Similar to [_commonProperties](#commonproperties) but returns count instead of -the objects. - - -**Examples** - - -A route planner example, all locations with the same properties: - - @startDocuBlockInline generalGraphModuleAmountProperties1 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleAmountProperties1} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._countCommonProperties({}, {}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleAmountProperties1 - -A route planner example, all German cities which share same properties except for population. - - @startDocuBlockInline generalGraphModuleAmountProperties2 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleAmountProperties2} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - | graph._countCommonProperties({}, {}, {vertex1CollectionRestriction : 'germanCity', - vertex2CollectionRestriction : 'germanCity' ,ignoreProperties: 'population'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleAmountProperties2 - - - -_paths ------- - - - -The _paths function returns all paths of a graph. - -`graph._paths(options)` - -This function determines all available paths in a graph. - -The complexity of this method is **O(n\*n\*m)** with *n* being the amount of vertices in -the graph and *m* the average amount of connected edges; - - -**Parameters** - - -- options (optional) An object containing options, see below: - - *direction*: The direction of the edges. Possible values are *any*, - *inbound* and *outbound* (default). - - *followCycles* (optional): If set to *true* the query follows cycles in the graph, - default is false. - - *minLength* (optional): Defines the minimal length a path must - have to be returned (default is 0). - - *maxLength* (optional): Defines the maximal length a path must - have to be returned (default is 10). - - -**Examples** - - -Return all paths of the graph "social": - - @startDocuBlockInline generalGraphModulePaths1 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModulePaths1} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var g = examples.loadGraph("social"); - g._paths(); - ~ examples.dropGraph("social"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModulePaths1 - -Return all inbound paths of the graph "social" with a maximal -length of 1 and a minimal length of 2: - - @startDocuBlockInline generalGraphModulePaths2 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModulePaths2} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var g = examples.loadGraph("social"); - g._paths({direction : 'inbound', minLength : 1, maxLength : 2}); - ~ examples.dropGraph("social"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModulePaths2 - - -_shortestPath -------------- - - - -The _shortestPath function returns all shortest paths of a graph. - -`graph._shortestPath(startVertexExample, endVertexExample, options)` - -This function determines all shortest paths in a graph. -The function accepts an id, an example, a list of examples -or even an empty example as parameter for -start and end vertex. -The length of a path is by default the amount of edges from one start vertex to -an end vertex. The option weight allows the user to define an edge attribute -representing the length. - - -**Parameters** - - -- startVertexExample (optional) An example for the desired start Vertices (see [Definition of examples](#definition-of-examples)). -- endVertexExample (optional) An example for the desired end Vertices (see [Definition of examples](#definition-of-examples)). -- options (optional) An object containing options, see below: - - *direction*: The direction of the edges as a string. - Possible values are *outbound*, *inbound* and *any* (default). - - *edgeCollectionRestriction*: One or multiple edge - collection names. Only edges from these collections will be considered for the path. - - *startVertexCollectionRestriction*: One or multiple vertex - collection names. Only vertices from these collections will be considered as - start vertex of a path. - - *endVertexCollectionRestriction*: One or multiple vertex - collection names. Only vertices from these collections will be considered as - end vertex of a path. - - *weight*: The name of the attribute of - the edges containing the length as a string. - - *defaultWeight*: Only used with the option *weight*. - If an edge does not have the attribute named as defined in option *weight* this default - is used as length. - If no default is supplied the default would be positive Infinity so the path could - not be calculated. - - -**Examples** - - -A route planner example, shortest path from all german to all french cities: - - @startDocuBlockInline generalGraphModuleShortestPaths1 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleShortestPaths1} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var g = examples.loadGraph("routeplanner"); - | g._shortestPath({}, {}, {weight : 'distance', endVertexCollectionRestriction : 'frenchCity', - startVertexCollectionRestriction : 'germanCity'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleShortestPaths1 - -A route planner example, shortest path from Hamburg and Cologne to Lyon: - - @startDocuBlockInline generalGraphModuleShortestPaths2 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleShortestPaths2} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var g = examples.loadGraph("routeplanner"); - | g._shortestPath([{_id: 'germanCity/Cologne'},{_id: 'germanCity/Munich'}], 'frenchCity/Lyon', - {weight : 'distance'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleShortestPaths2 - - - -_distanceTo ------------ - - - -The _distanceTo function returns all paths and there distance within a graph. - -`graph._distanceTo(startVertexExample, endVertexExample, options)` - -This function is a wrapper of [graph._shortestPath](#shortestpath). -It does not return the actual path but only the distance between two vertices. - - -**Examples** - - -A route planner example, shortest distance from all german to all french cities: - - @startDocuBlockInline generalGraphModuleDistanceTo1 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleDistanceTo1} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var g = examples.loadGraph("routeplanner"); - | g._distanceTo({}, {}, {weight : 'distance', endVertexCollectionRestriction : 'frenchCity', - startVertexCollectionRestriction : 'germanCity'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleDistanceTo1 - -A route planner example, shortest distance from Hamburg and Cologne to Lyon: - - @startDocuBlockInline generalGraphModuleDistanceTo2 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleDistanceTo2} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var g = examples.loadGraph("routeplanner"); - | g._distanceTo([{_id: 'germanCity/Cologne'},{_id: 'germanCity/Munich'}], 'frenchCity/Lyon', - {weight : 'distance'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleDistanceTo2 - - - -_absoluteEccentricity ---------------------- - - - -Get the -[eccentricity](http://en.wikipedia.org/wiki/Distance_%28graph_theory%29) -of the vertices defined by the examples. - -`graph._absoluteEccentricity(vertexExample, options)` - -The function accepts an id, an example, a list of examples or even an empty -example as parameter for vertexExample. - - -**Parameters** - -- vertexExample (optional) Filter the vertices, see [Definition of examples](#definition-of-examples) -- options (optional) An object defining further options. Can have the following values: - - *direction*: The direction of the edges. Possible values are *outbound*, *inbound* and *any* (default). - - *edgeCollectionRestriction* : One or a list of edge-collection names that should be - considered to be on the path. - - *startVertexCollectionRestriction* : One or a list of vertex-collection names that should be - considered for source vertices. - - *endVertexCollectionRestriction* : One or a list of vertex-collection names that should be - considered for target vertices. - - *weight*: The name of the attribute of the edges containing the weight. - - *defaultWeight*: Only used with the option *weight*. - If an edge does not have the attribute named as defined in option *weight* this default - is used as weight. - If no default is supplied the default would be positive infinity so the path and - hence the eccentricity can not be calculated. - - -**Examples** - - -A route planner example, the absolute eccentricity of all locations. - - @startDocuBlockInline generalGraphModuleAbsEccentricity1 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleAbsEccentricity1} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._absoluteEccentricity({}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleAbsEccentricity1 - -A route planner example, the absolute eccentricity of all locations. -This considers the actual distances. - - @startDocuBlockInline generalGraphModuleAbsEccentricity2 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleAbsEccentricity2} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._absoluteEccentricity({}, {weight : 'distance'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleAbsEccentricity2 - -A route planner example, the absolute eccentricity of all cities regarding only -outbound paths. - - @startDocuBlockInline generalGraphModuleAbsEccentricity3 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleAbsEccentricity3} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - | graph._absoluteEccentricity({}, {startVertexCollectionRestriction : 'germanCity', - direction : 'outbound', weight : 'distance'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleAbsEccentricity3 - - - -_eccentricity -------------- - - - -Get the normalized -[eccentricity](http://en.wikipedia.org/wiki/Distance_%28graph_theory%29) -of the vertices defined by the examples. - -`graph._eccentricity(vertexExample, options)` - -Similar to [_absoluteEccentricity](#absoluteeccentricity) but returns a normalized result. - - -**Examples** - - -A route planner example, the eccentricity of all locations. - - @startDocuBlockInline generalGraphModuleEccentricity2 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleEccentricity2} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._eccentricity(); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleEccentricity2 - -A route planner example, the weighted eccentricity. - - @startDocuBlockInline generalGraphModuleEccentricity3 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleEccentricity3} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._eccentricity({weight : 'distance'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleEccentricity3 - - - -_absoluteCloseness ------------------- - - - -Get the -[closeness](http://en.wikipedia.org/wiki/Centrality#Closeness_centrality) -of the vertices defined by the examples. - -`graph._absoluteCloseness(vertexExample, options)` - -The function accepts an id, an example, a list of examples or even an empty -example as parameter for *vertexExample*. - -**Parameters** - -- vertexExample (optional) Filter the vertices, see [Definition of examples](#definition-of-examples) -- options (optional) An object defining further options. Can have the following values: - - *direction*: The direction of the edges. Possible values are *outbound*, *inbound* and *any* (default). - - *edgeCollectionRestriction* : One or a list of edge-collection names that should be - considered to be on the path. - - *startVertexCollectionRestriction* : One or a list of vertex-collection names that should be - considered for source vertices. - - *endVertexCollectionRestriction* : One or a list of vertex-collection names that should be - considered for target vertices. - - *weight*: The name of the attribute of the edges containing the weight. - - *defaultWeight*: Only used with the option *weight*. - If an edge does not have the attribute named as defined in option *weight* this default - is used as weight. - If no default is supplied the default would be positive infinity so the path and - hence the closeness can not be calculated. - - -**Examples** - - -A route planner example, the absolute closeness of all locations. - - @startDocuBlockInline generalGraphModuleAbsCloseness1 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleAbsCloseness1} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._absoluteCloseness({}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleAbsCloseness1 - -A route planner example, the absolute closeness of all locations. -This considers the actual distances. - - @startDocuBlockInline generalGraphModuleAbsCloseness2 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleAbsCloseness2} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._absoluteCloseness({}, {weight : 'distance'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleAbsCloseness2 - -A route planner example, the absolute closeness of all German Cities regarding only -outbound paths. - - @startDocuBlockInline generalGraphModuleAbsCloseness3 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleAbsCloseness3} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - | graph._absoluteCloseness({}, {startVertexCollectionRestriction : 'germanCity', - direction : 'outbound', weight : 'distance'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleAbsCloseness3 - - - -_closeness ----------- - - - -Get the normalized -[closeness](http://en.wikipedia.org/wiki/Centrality#Closeness_centrality) -of graphs vertices. - -`graph._closeness(options)` - -Similar to [_absoluteCloseness](#absolutecloseness) but returns a normalized value. - - -**Examples** - - -A route planner example, the normalized closeness of all locations. - - @startDocuBlockInline generalGraphModuleCloseness1 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleCloseness1} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._closeness(); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleCloseness1 - -A route planner example, the closeness of all locations. -This considers the actual distances. - - @startDocuBlockInline generalGraphModuleCloseness2 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleCloseness2} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._closeness({weight : 'distance'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleCloseness2 - -A route planner example, the closeness of all cities regarding only -outbound paths. - - @startDocuBlockInline generalGraphModuleCloseness3 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleCloseness3} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._closeness({direction : 'outbound', weight : 'distance'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleCloseness3 - - - -_absoluteBetweenness --------------------- - - - -Get the -[betweenness](http://en.wikipedia.org/wiki/Betweenness_centrality) -of all vertices in the graph. - -`graph._absoluteBetweenness(vertexExample, options)` - - - - -**Parameters** - -- vertexExample (optional) Filter the vertices, see [Definition of examples](#definition-of-examples) -- options (optional) An object defining further options. Can have the following values: - - *direction*: The direction of the edges. Possible values are *outbound*, *inbound* and *any* (default). - - *weight*: The name of the attribute of the edges containing the weight. - - *defaultWeight*: Only used with the option *weight*. - If an edge does not have the attribute named as defined in option *weight* this default - is used as weight. - If no default is supplied the default would be positive infinity so the path and - hence the betweeness can not be calculated. - - -**Examples** - - -A route planner example, the absolute betweenness of all locations. - - @startDocuBlockInline generalGraphModuleAbsBetweenness1 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleAbsBetweenness1} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._absoluteBetweenness({}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleAbsBetweenness1 - -A route planner example, the absolute betweenness of all locations. -This considers the actual distances. - - @startDocuBlockInline generalGraphModuleAbsBetweenness2 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleAbsBetweenness2} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._absoluteBetweenness({weight : 'distance'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleAbsBetweenness2 - -A route planner example, the absolute betweenness of all cities regarding only -outbound paths. - - @startDocuBlockInline generalGraphModuleAbsBetweenness3 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleAbsBetweenness3} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._absoluteBetweenness({direction : 'outbound', weight : 'distance'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleAbsBetweenness3 - - - -_betweenness ------------- - - - -Get the normalized -[betweenness](http://en.wikipedia.org/wiki/Betweenness_centrality) -of graphs vertices. - -`graph_module._betweenness(options)` - -Similar to [_absoluteBetweeness](#absolutebetweenness) but returns normalized values. - - -**Examples** - - -A route planner example, the betweenness of all locations. - - @startDocuBlockInline generalGraphModuleBetweenness1 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleBetweenness1} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._betweenness(); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleBetweenness1 - -A route planner example, the betweenness of all locations. -This considers the actual distances. - - @startDocuBlockInline generalGraphModuleBetweenness2 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleBetweenness2} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._betweenness({weight : 'distance'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleBetweenness2 - -A route planner example, the betweenness of all cities regarding only -outbound paths. - - @startDocuBlockInline generalGraphModuleBetweenness3 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleBetweenness3} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._betweenness({direction : 'outbound', weight : 'distance'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleBetweenness3 - - - -_radius -------- - - - -Get the -[radius](http://en.wikipedia.org/wiki/Eccentricity_%28graph_theory%29) -of a graph. - -` - - -**Parameters** - -- options (optional) An object defining further options. Can have the following values: - - *direction*: The direction of the edges. Possible values are *outbound*, *inbound* and *any* (default). - - *weight*: The name of the attribute of the edges containing the weight. - - *defaultWeight*: Only used with the option *weight*. - If an edge does not have the attribute named as defined in option *weight* this default - is used as weight. - If no default is supplied the default would be positive infinity so the path and - hence the radius can not be calculated. - - -**Examples** - - -A route planner example, the radius of the graph. - - @startDocuBlockInline generalGraphModuleRadius1 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleRadius1} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._radius(); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleRadius1 - -A route planner example, the radius of the graph. -This considers the actual distances. - - @startDocuBlockInline generalGraphModuleRadius2 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleRadius2} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._radius({weight : 'distance'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleRadius2 - -A route planner example, the radius of the graph regarding only -outbound paths. - - @startDocuBlockInline generalGraphModuleRadius3 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleRadius3} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._radius({direction : 'outbound', weight : 'distance'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleRadius3 - - - -_diameter ---------- - - - -Get the -[diameter](http://en.wikipedia.org/wiki/Eccentricity_%28graph_theory%29) -of a graph. - -`graph._diameter(graphName, options)` - - -**Parameters** - -- options (optional) An object defining further options. Can have the following values: - - *direction*: The direction of the edges. Possible values are *outbound*, *inbound* and *any* (default). - - *weight*: The name of the attribute of the edges containing the weight. - - *defaultWeight*: Only used with the option *weight*. - If an edge does not have the attribute named as defined in option *weight* this default - is used as weight. - If no default is supplied the default would be positive infinity so the path and - hence the radius can not be calculated. - - -**Examples** - - -A route planner example, the diameter of the graph. - - @startDocuBlockInline generalGraphModuleDiameter1 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleDiameter1} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._diameter(); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleDiameter1 - -A route planner example, the diameter of the graph. -This considers the actual distances. - - @startDocuBlockInline generalGraphModuleDiameter2 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleDiameter2} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._diameter({weight : 'distance'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleDiameter2 - -A route planner example, the diameter of the graph regarding only -outbound paths. - - @startDocuBlockInline generalGraphModuleDiameter3 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphModuleDiameter3} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("routeplanner"); - graph._diameter({direction : 'outbound', weight : 'distance'}); - ~ examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphModuleDiameter3 diff --git a/Documentation/Books/Manual/Graphs/GeneralGraphs/Management.md b/Documentation/Books/Manual/Graphs/GeneralGraphs/Management.md deleted file mode 100644 index bd77e8bd7757..000000000000 --- a/Documentation/Books/Manual/Graphs/GeneralGraphs/Management.md +++ /dev/null @@ -1,853 +0,0 @@ -Graph Management -================ - -This chapter describes the javascript interface for [creating and modifying named graphs](../README.md). -In order to create a non empty graph the functionality to create edge definitions has to be introduced first: - -Edge Definitions ----------------- - -An edge definition is always a directed relation of a graph. Each graph can have arbitrary many relations defined within the edge definitions array. - -### Initialize the list - - - -Create a list of edge definitions to construct a graph. - -`graph_module._edgeDefinitions(relation1, relation2, ..., relationN)` - -The list of edge definitions of a graph can be managed by the graph module itself. -This function is the entry point for the management and will return the correct list. - - -**Parameters** - -* relationX (optional) An object representing a definition of one relation in the graph - -**Examples** - - - @startDocuBlockInline generalGraphEdgeDefinitionsSimple - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphEdgeDefinitionsSimple} - var graph_module = require("@arangodb/general-graph"); - directed_relation = graph_module._relation("lives_in", "user", "city"); - undirected_relation = graph_module._relation("knows", "user", "user"); - edgedefinitions = graph_module._edgeDefinitions(directed_relation, undirected_relation); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphEdgeDefinitionsSimple - - - -### Extend the list - - - -Extend the list of edge definitions to construct a graph. - -`graph_module._extendEdgeDefinitions(edgeDefinitions, relation1, relation2, ..., relationN)` - -In order to add more edge definitions to the graph before creating -this function can be used to add more definitions to the initial list. - - -**Parameters** - -* edgeDefinitions (required) A list of relation definition objects. -* relationX (required) An object representing a definition of one relation in the graph - -**Examples** - - - @startDocuBlockInline generalGraphEdgeDefinitionsExtend - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphEdgeDefinitionsExtend} - var graph_module = require("@arangodb/general-graph"); - directed_relation = graph_module._relation("lives_in", "user", "city"); - undirected_relation = graph_module._relation("knows", "user", "user"); - edgedefinitions = graph_module._edgeDefinitions(directed_relation); - edgedefinitions = graph_module._extendEdgeDefinitions(undirected_relation); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphEdgeDefinitionsExtend - - - -### Relation - -Define a directed relation. - -`graph_module._relation(relationName, fromVertexCollections, toVertexCollections)` - -The *relationName* defines the name of this relation and references to the underlying edge collection. -The *fromVertexCollections* is an Array of document collections holding the start vertices. -The *toVertexCollections* is an Array of document collections holding the target vertices. -Relations are only allowed in the direction from any collection in *fromVertexCollections* -to any collection in *toVertexCollections*. - - -**Parameters** - -* relationName (required) The name of the edge collection where the edges should be stored. - Will be created if it does not yet exist. -* fromVertexCollections (required) One or a list of collection names. Source vertices for the edges - have to be stored in these collections. Collections will be created if they do not exist. -* toVertexCollections (required) One or a list of collection names. Target vertices for the edges - have to be stored in these collections. Collections will be created if they do not exist. - - -**Examples** - - - @startDocuBlockInline generalGraphRelationDefinitionSave - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphRelationDefinitionSave} - var graph_module = require("@arangodb/general-graph"); - graph_module._relation("has_bought", ["Customer", "Company"], ["Groceries", "Electronics"]); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphRelationDefinitionSave - - @startDocuBlockInline generalGraphRelationDefinitionSingle - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphRelationDefinitionSingle} - var graph_module = require("@arangodb/general-graph"); - graph_module._relation("has_bought", "Customer", "Product"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphRelationDefinitionSingle - - -Create a graph --------------- - -After having introduced edge definitions a graph can be created. - - - -Create a graph - -`graph_module._create(graphName, edgeDefinitions, orphanCollections)` - -The creation of a graph requires the name of the graph and a definition of its edges. - -For every type of edge definition a convenience method exists that can be used to create a graph. -Optionally a list of vertex collections can be added, which are not used in any edge definition. -These collections are referred to as orphan collections within this chapter. -All collections used within the creation process are created if they do not exist. - - -**Parameters** - -* graphName (required) Unique identifier of the graph -* edgeDefinitions (optional) List of relation definition objects -* orphanCollections (optional) List of additional vertex collection names - - -**Examples** - - -Create an empty graph, edge definitions can be added at runtime: - - @startDocuBlockInline generalGraphCreateGraphNoData - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphCreateGraphNoData} - var graph_module = require("@arangodb/general-graph"); - graph = graph_module._create("myGraph"); - ~ graph_module._drop("myGraph", true); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphCreateGraphNoData - -Create a graph using an edge collection `edges` and a single vertex collection `vertices` - - @startDocuBlockInline generalGraphCreateGraphSingle - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphCreateGraphSingle} - ~ db._drop("edges"); - ~ db._drop("vertices"); - var graph_module = require("@arangodb/general-graph"); - var edgeDefinitions = [ { collection: "edges", "from": [ "vertices" ], "to" : [ "vertices" ] } ]; - graph = graph_module._create("myGraph", edgeDefinitions); - ~ graph_module._drop("myGraph", true); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphCreateGraphSingle - -Create a graph with edge definitions and orphan collections: - - @startDocuBlockInline generalGraphCreateGraph2 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphCreateGraph2} - var graph_module = require("@arangodb/general-graph"); - | graph = graph_module._create("myGraph", - [graph_module._relation("myRelation", ["male", "female"], ["male", "female"])], ["sessions"]); - ~ graph_module._drop("myGraph", true); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphCreateGraph2 - - - -### Complete Example to create a graph - -Example Call: - - - - @startDocuBlockInline general_graph_create_graph_example1 - @EXAMPLE_ARANGOSH_OUTPUT{general_graph_create_graph_example1} - var graph_module = require("@arangodb/general-graph"); - var edgeDefinitions = graph_module._edgeDefinitions(); - graph_module._extendEdgeDefinitions(edgeDefinitions, graph_module._relation("friend_of", "Customer", "Customer")); - | graph_module._extendEdgeDefinitions( - | edgeDefinitions, graph_module._relation( - "has_bought", ["Customer", "Company"], ["Groceries", "Electronics"])); - graph_module._create("myStore", edgeDefinitions); - ~ graph_module._drop("myStore"); - ~ db._drop("Electronics"); - ~ db._drop("Customer"); - ~ db._drop("Groceries"); - ~ db._drop("Company"); - ~ db._drop("has_bought"); - ~ db._drop("friend_of"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock general_graph_create_graph_example1 - - - -alternative call: - - - - @startDocuBlockInline general_graph_create_graph_example2 - @EXAMPLE_ARANGOSH_OUTPUT{general_graph_create_graph_example2} - var graph_module = require("@arangodb/general-graph"); - | var edgeDefinitions = graph_module._edgeDefinitions( - | graph_module._relation("friend_of", ["Customer"], ["Customer"]), graph_module._relation( - "has_bought", ["Customer", "Company"], ["Groceries", "Electronics"])); - graph_module._create("myStore", edgeDefinitions); - ~ graph_module._drop("myStore"); - ~ db._drop("Electronics"); - ~ db._drop("Customer"); - ~ db._drop("Groceries"); - ~ db._drop("Company"); - ~ db._drop("has_bought"); - ~ db._drop("friend_of"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock general_graph_create_graph_example2 - - - -List available graphs ---------------------- - - - -List all graphs. - -`graph_module._list()` - -Lists all graph names stored in this database. - - -**Examples** - - - @startDocuBlockInline generalGraphList - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphList} - var graph_module = require("@arangodb/general-graph"); - graph_module._list(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphList - - -Load a graph ------------- - - - -Get a graph - -`graph_module._graph(graphName)` - -A graph can be retrieved by its name. - - -**Parameters** - -* graphName (required) Unique identifier of the graph - - -**Examples** - - -Get a graph: - - @startDocuBlockInline generalGraphLoadGraph - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphLoadGraph} - ~ var examples = require("@arangodb/graph-examples/example-graph.js"); - ~ var g1 = examples.loadGraph("social"); - var graph_module = require("@arangodb/general-graph"); - graph = graph_module._graph("social"); - ~ examples.dropGraph("social"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphLoadGraph - - - -Remove a graph --------------- - - -Remove a graph - -`graph_module._drop(graphName, dropCollections)` - -A graph can be dropped by its name. -This can drop all collections contained in the graph as long as they are not used within other graphs. -To drop the collections only belonging to this graph, the optional parameter *drop-collections* has to be set to *true*. - -**Parameters** - -* graphName (required) Unique identifier of the graph -* dropCollections (optional) Define if collections should be dropped (default: false) - -**Examples** - - -Drop a graph and keep collections: - - @startDocuBlockInline generalGraphDropGraphKeep - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphDropGraphKeep} - ~ var examples = require("@arangodb/graph-examples/example-graph.js"); - ~ var g1 = examples.loadGraph("social"); - var graph_module = require("@arangodb/general-graph"); - graph_module._drop("social"); - db._collection("female"); - db._collection("male"); - db._collection("relation"); - ~ db._drop("female"); - ~ db._drop("male"); - ~ db._drop("relation"); - ~ examples.dropGraph("social"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphDropGraphKeep - - @startDocuBlockInline generalGraphDropGraphDropCollections - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphDropGraphDropCollections} - ~ var examples = require("@arangodb/graph-examples/example-graph.js"); - ~ var g1 = examples.loadGraph("social"); - var graph_module = require("@arangodb/general-graph"); - graph_module._drop("social", true); - db._collection("female"); - db._collection("male"); - db._collection("relation"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphDropGraphDropCollections - - - -Modify a graph definition at runtime ------------------------------------- - -After you have created an graph its definition is not immutable. -You can still add, delete or modify edge definitions and vertex collections. - -### Extend the edge definitions - - - -Add another edge definition to the graph - -`graph._extendEdgeDefinitions(edgeDefinition)` - -Extends the edge definitions of a graph. If an orphan collection is used in this -edge definition, it will be removed from the orphanage. If the edge collection of -the edge definition to add is already used in the graph or used in a different -graph with different *from* and/or *to* collections an error is thrown. - - -**Parameters** - -* edgeDefinition (required) The relation definition to extend the graph - - -**Examples** - - - @startDocuBlockInline general_graph__extendEdgeDefinitions - @EXAMPLE_ARANGOSH_OUTPUT{general_graph__extendEdgeDefinitions} - var graph_module = require("@arangodb/general-graph") - ~ if (graph_module._exists("myGraph")){var blub = graph_module._drop("myGraph", true);} - var ed1 = graph_module._relation("myEC1", ["myVC1"], ["myVC2"]); - var ed2 = graph_module._relation("myEC2", ["myVC1"], ["myVC3"]); - var graph = graph_module._create("myGraph", [ed1]); - graph._extendEdgeDefinitions(ed2); - ~ var blub = graph_module._drop("myGraph", true); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock general_graph__extendEdgeDefinitions - - - -### Modify an edge definition - - - -Modify an relation definition - -`graph_module._editEdgeDefinitions(edgeDefinition)` - -Edits one relation definition of a graph. The edge definition used as argument will -replace the existing edge definition of the graph which has the same collection. -Vertex Collections of the replaced edge definition that are not used in the new -definition will transform to an orphan. Orphans that are used in this new edge -definition will be deleted from the list of orphans. Other graphs with the same edge -definition will be modified, too. - - -**Parameters** - -* edgeDefinition (required) The edge definition to replace the existing edge - definition with the same attribute *collection*. - - -**Examples** - - - @startDocuBlockInline general_graph__editEdgeDefinition - @EXAMPLE_ARANGOSH_OUTPUT{general_graph__editEdgeDefinition} - var graph_module = require("@arangodb/general-graph") - ~ if (graph_module._exists("myGraph")){var blub = graph_module._drop("myGraph", true);} - var original = graph_module._relation("myEC1", ["myVC1"], ["myVC2"]); - var modified = graph_module._relation("myEC1", ["myVC2"], ["myVC3"]); - var graph = graph_module._create("myGraph", [original]); - graph._editEdgeDefinitions(modified); - ~ var blub = graph_module._drop("myGraph", true); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock general_graph__editEdgeDefinition - - - -### Delete an edge definition - - - -Delete one relation definition - -`graph_module._deleteEdgeDefinition(edgeCollectionName, dropCollection)` - -Deletes a relation definition defined by the edge collection of a graph. If the -collections defined in the edge definition (collection, from, to) are not used -in another edge definition of the graph, they will be moved to the orphanage. - - -**Parameters** - -* edgeCollectionName (required) Name of edge collection in the relation definition. -* dropCollection (optional) Define if the edge collection should be dropped. Default false. - -**Examples** - - -Remove an edge definition but keep the edge collection: - - @startDocuBlockInline general_graph__deleteEdgeDefinitionNoDrop - @EXAMPLE_ARANGOSH_OUTPUT{general_graph__deleteEdgeDefinitionNoDrop} - var graph_module = require("@arangodb/general-graph") - ~ if (graph_module._exists("myGraph")){var blub = graph_module._drop("myGraph", true);} - var ed1 = graph_module._relation("myEC1", ["myVC1"], ["myVC2"]); - var ed2 = graph_module._relation("myEC2", ["myVC1"], ["myVC3"]); - var graph = graph_module._create("myGraph", [ed1, ed2]); - graph._deleteEdgeDefinition("myEC1"); - db._collection("myEC1"); - ~ db._drop("myEC1"); - ~ var blub = graph_module._drop("myGraph", true); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock general_graph__deleteEdgeDefinitionNoDrop - -Remove an edge definition and drop the edge collection: - - @startDocuBlockInline general_graph__deleteEdgeDefinitionWithDrop - @EXAMPLE_ARANGOSH_OUTPUT{general_graph__deleteEdgeDefinitionWithDrop} - var graph_module = require("@arangodb/general-graph") - ~ if (graph_module._exists("myGraph")){var blub = graph_module._drop("myGraph", true);} - var ed1 = graph_module._relation("myEC1", ["myVC1"], ["myVC2"]); - var ed2 = graph_module._relation("myEC2", ["myVC1"], ["myVC3"]); - var graph = graph_module._create("myGraph", [ed1, ed2]); - graph._deleteEdgeDefinition("myEC1", true); - db._collection("myEC1"); - ~ db._drop("myEC1"); - ~ var blub = graph_module._drop("myGraph", true); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock general_graph__deleteEdgeDefinitionWithDrop - - - -### Extend vertex Collections - -Each graph can have an arbitrary amount of vertex collections, which are not part of any edge definition of the graph. -These collections are called orphan collections. -If the graph is extended with an edge definition using one of the orphans, -it will be removed from the set of orphan collection automatically. - -#### Add a vertex collection - - - -Add a vertex collection to the graph - -`graph._addVertexCollection(vertexCollectionName, createCollection)` - -Adds a vertex collection to the set of orphan collections of the graph. If the -collection does not exist, it will be created. If it is already used by any edge -definition of the graph, an error will be thrown. - - -**Parameters** - -* vertexCollectionName (required) Name of vertex collection. -* createCollection (optional) If true the collection will be created if it does not exist. Default: true. - -**Examples** - - - @startDocuBlockInline general_graph__addVertexCollection - @EXAMPLE_ARANGOSH_OUTPUT{general_graph__addVertexCollection} - var graph_module = require("@arangodb/general-graph"); - ~ if (graph_module._exists("myGraph")){var blub = graph_module._drop("myGraph", true);} - var ed1 = graph_module._relation("myEC1", ["myVC1"], ["myVC2"]); - var graph = graph_module._create("myGraph", [ed1]); - graph._addVertexCollection("myVC3", true); - ~ db._drop("myVC3"); - ~ var blub = graph_module._drop("myGraph", true); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock general_graph__addVertexCollection - - - -#### Get the orphaned collections - - - -Get all orphan collections - -`graph._orphanCollections()` - -Returns all vertex collections of the graph that are not used in any edge definition. - - -**Examples** - - - @startDocuBlockInline general_graph__orphanCollections - @EXAMPLE_ARANGOSH_OUTPUT{general_graph__orphanCollections} - var graph_module = require("@arangodb/general-graph") - ~ if (graph_module._exists("myGraph")){var blub = graph_module._drop("myGraph", true);} - var ed1 = graph_module._relation("myEC1", ["myVC1"], ["myVC2"]); - var graph = graph_module._create("myGraph", [ed1]); - graph._addVertexCollection("myVC3", true); - graph._orphanCollections(); - ~ var blub = graph_module._drop("myGraph", true); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock general_graph__orphanCollections - - - -#### Remove a vertex collection - - - -Remove a vertex collection from the graph - -`graph._removeVertexCollection(vertexCollectionName, dropCollection)` - -Removes a vertex collection from the graph. -Only collections not used in any relation definition can be removed. -Optionally the collection can be deleted, if it is not used in any other graph. - - -**Parameters** - -* vertexCollectionName (required) Name of vertex collection. -* dropCollection (optional) If true the collection will be dropped if it is - not used in any other graph. Default: false. - -**Examples** - - - @startDocuBlockInline general_graph__removeVertexCollections - @EXAMPLE_ARANGOSH_OUTPUT{general_graph__removeVertexCollections} - var graph_module = require("@arangodb/general-graph") - ~ if (graph_module._exists("myGraph")){var blub = graph_module._drop("myGraph", true);} - var ed1 = graph_module._relation("myEC1", ["myVC1"], ["myVC2"]); - var graph = graph_module._create("myGraph", [ed1]); - graph._addVertexCollection("myVC3", true); - graph._addVertexCollection("myVC4", true); - graph._orphanCollections(); - graph._removeVertexCollection("myVC3"); - graph._orphanCollections(); - ~ db._drop("myVC3"); - ~ var blub = graph_module._drop("myGraph", true); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock general_graph__removeVertexCollections - - - - -Manipulating Vertices ---------------------- - -### Save a vertex - - - -Create a new vertex in vertexCollectionName - -`graph.vertexCollectionName.save(data)` - - -**Parameters** - -* data (required) JSON data of vertex. - - -**Examples** - - - @startDocuBlockInline generalGraphVertexCollectionSave - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphVertexCollectionSave} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("social"); - graph.male.save({name: "Floyd", _key: "floyd"}); - ~ examples.dropGraph("social"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphVertexCollectionSave - - - -### Replace a vertex - - - -Replaces the data of a vertex in collection vertexCollectionName - -`graph.vertexCollectionName.replace(vertexId, data, options)` - - -**Parameters** - -* vertexId (required) *_id* attribute of the vertex -* data (required) JSON data of vertex. -* options (optional) See [collection documentation](../../DataModeling/Documents/DocumentMethods.md) - - -**Examples** - - - @startDocuBlockInline generalGraphVertexCollectionReplace - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphVertexCollectionReplace} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("social"); - graph.male.save({neym: "Jon", _key: "john"}); - graph.male.replace("male/john", {name: "John"}); - ~ examples.dropGraph("social"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphVertexCollectionReplace - - - -### Update a vertex - - - -Updates the data of a vertex in collection vertexCollectionName - -`graph.vertexCollectionName.update(vertexId, data, options)` - - -**Parameters** - -* vertexId (required) *_id* attribute of the vertex -* data (required) JSON data of vertex. -* options (optional) See [collection documentation](../../DataModeling/Documents/DocumentMethods.md) - -**Examples** - - - @startDocuBlockInline generalGraphVertexCollectionUpdate - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphVertexCollectionUpdate} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("social"); - graph.female.save({name: "Lynda", _key: "linda"}); - graph.female.update("female/linda", {name: "Linda", _key: "linda"}); - ~ examples.dropGraph("social"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphVertexCollectionUpdate - - - -### Remove a vertex - - - -Removes a vertex in collection *vertexCollectionName* - -`graph.vertexCollectionName.remove(vertexId, options)` - -Additionally removes all ingoing and outgoing edges of the vertex recursively -(see [edge remove](#remove-an-edge)). - - -**Parameters** - -* vertexId (required) *_id* attribute of the vertex -* options (optional) See [collection documentation](../../DataModeling/Documents/DocumentMethods.md) - - -**Examples** - - - @startDocuBlockInline generalGraphVertexCollectionRemove - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphVertexCollectionRemove} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("social"); - graph.male.save({name: "Kermit", _key: "kermit"}); - db._exists("male/kermit") - graph.male.remove("male/kermit") - db._exists("male/kermit") - ~ examples.dropGraph("social"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphVertexCollectionRemove - - - -Manipulating Edges ------------------- - -### Save a new edge - - - -Creates an edge from vertex *from* to vertex *to* in collection edgeCollectionName - -`graph.edgeCollectionName.save(from, to, data, options)` - - -**Parameters** - -* from (required) *_id* attribute of the source vertex -* to (required) *_id* attribute of the target vertex -* data (required) JSON data of the edge -* options (optional) See [collection documentation](../Edges/README.md) - - -**Examples** - - - @startDocuBlockInline generalGraphEdgeCollectionSave1 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphEdgeCollectionSave1} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("social"); - graph.relation.save("male/bob", "female/alice", {type: "married", _key: "bobAndAlice"}); - ~ examples.dropGraph("social"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphEdgeCollectionSave1 - -If the collections of *from* and *to* are not defined in an edge definition of the graph, -the edge will not be stored. - - @startDocuBlockInline generalGraphEdgeCollectionSave2 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphEdgeCollectionSave2} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("social"); - | graph.relation.save( - | "relation/aliceAndBob", - | "female/alice", - {type: "married", _key: "bobAndAlice"}); // xpError(ERROR_GRAPH_INVALID_EDGE) - ~ examples.dropGraph("social"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphEdgeCollectionSave2 - - -### Replace an edge - - - -Replaces the data of an edge in collection edgeCollectionName. Note that `_from` and `_to` are mandatory. - -`graph.edgeCollectionName.replace(edgeId, data, options)` - - -**Parameters** - -* edgeId (required) *_id* attribute of the edge -* data (required) JSON data of the edge -* options (optional) See [collection documentation](../../DataModeling/Documents/DocumentMethods.md) - - -**Examples** - - - @startDocuBlockInline generalGraphEdgeCollectionReplace - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphEdgeCollectionReplace} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("social"); - graph.relation.save("female/alice", "female/diana", {typo: "nose", _key: "aliceAndDiana"}); - graph.relation.replace("relation/aliceAndDiana", {type: "knows", _from: "female/alice", _to: "female/diana"}); - ~ examples.dropGraph("social"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphEdgeCollectionReplace - - - -### Update an edge - - - -Updates the data of an edge in collection edgeCollectionName - -`graph.edgeCollectionName.update(edgeId, data, options)` - - -**Parameters** - -* edgeId (required) *_id* attribute of the edge -* data (required) JSON data of the edge -* options (optional) See [collection documentation](../../DataModeling/Documents/DocumentMethods.md) - - -**Examples** - - - @startDocuBlockInline generalGraphEdgeCollectionUpdate - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphEdgeCollectionUpdate} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("social"); - graph.relation.save("female/alice", "female/diana", {type: "knows", _key: "aliceAndDiana"}); - graph.relation.update("relation/aliceAndDiana", {type: "quarreled", _key: "aliceAndDiana"}); - ~ examples.dropGraph("social"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphEdgeCollectionUpdate - - - -### Remove an edge - - - -Removes an edge in collection edgeCollectionName - -`graph.edgeCollectionName.remove(edgeId, options)` - -If this edge is used as a vertex by another edge, the other edge will be removed (recursively). - - -**Parameters** - -* edgeId (required) *_id* attribute of the edge -* options (optional) See [collection documentation](../../DataModeling/Documents/DocumentMethods.md) - - -**Examples** - - - @startDocuBlockInline generalGraphEdgeCollectionRemove - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphEdgeCollectionRemove} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("social"); - graph.relation.save("female/alice", "female/diana", {_key: "aliceAndDiana"}); - db._exists("relation/aliceAndDiana") - graph.relation.remove("relation/aliceAndDiana") - db._exists("relation/aliceAndDiana") - ~ examples.dropGraph("social"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphEdgeCollectionRemove diff --git a/Documentation/Books/Manual/Graphs/GeneralGraphs/README.md b/Documentation/Books/Manual/Graphs/GeneralGraphs/README.md deleted file mode 100644 index f7fb36aa3035..000000000000 --- a/Documentation/Books/Manual/Graphs/GeneralGraphs/README.md +++ /dev/null @@ -1,55 +0,0 @@ -Graphs -====== - -This chapter describes the [general-graph](../README.md) module. -It allows you to define a graph that is spread across several edge and document collections. -This allows you to structure your models in line with your domain and group them logically in collections giving you the power to query them in the same graph queries. -There is no need to include the referenced collections within the query, this module will handle it for you. - -New to ArangoDB? Take the free -[ArangoDB Graph Course](https://www.arangodb.com/arangodb-graph-course) -for freshers. - -Three Steps to create a graph ------------------------------ - -* Create a graph - - @startDocuBlockInline generalGraphCreateGraphHowTo1 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphCreateGraphHowTo1} - var graph_module = require("@arangodb/general-graph"); - var graph = graph_module._create("myGraph"); - graph; - ~ graph_module._drop("myGraph", true); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphCreateGraphHowTo1 - -* Add some vertex collections - - @startDocuBlockInline generalGraphCreateGraphHowTo2 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphCreateGraphHowTo2} - ~ var graph_module = require("@arangodb/general-graph"); - ~ var graph = graph_module._create("myGraph"); - graph._addVertexCollection("shop"); - graph._addVertexCollection("customer"); - graph._addVertexCollection("pet"); - graph; - ~ graph_module._drop("myGraph", true); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphCreateGraphHowTo2 - -* Define relations on the Graph - - @startDocuBlockInline generalGraphCreateGraphHowTo3 - @EXAMPLE_ARANGOSH_OUTPUT{generalGraphCreateGraphHowTo3} - ~ var graph_module = require("@arangodb/general-graph"); - ~ var graph = graph_module._create("myGraph"); - ~ graph._addVertexCollection("pet"); - var rel = graph_module._relation("isCustomer", ["shop"], ["customer"]); - graph._extendEdgeDefinitions(rel); - graph; - ~ graph_module._drop("myGraph", true); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock generalGraphCreateGraphHowTo3 - - diff --git a/Documentation/Books/Manual/Graphs/Pregel/README.md b/Documentation/Books/Manual/Graphs/Pregel/README.md deleted file mode 100644 index b899b0e5ba24..000000000000 --- a/Documentation/Books/Manual/Graphs/Pregel/README.md +++ /dev/null @@ -1,378 +0,0 @@ -Distributed Iterative Graph Processing (Pregel) -=============================================== - -Distributed graph processing enables you to do online analytical processing -directly on graphs stored into ArangoDB. This is intended to help you gain analytical insights -on your data, without having to use external processing systems. Examples of algorithms -to execute are PageRank, Vertex Centrality, Vertex Closeness, Connected Components, Community Detection. -This system is not useful for typical online queries, where you just do work on a small set of vertices. -These kind of tasks are better suited for AQL. - -Check out the hands-on -[ArangoDB Pregel Tutorial](https://www.arangodb.com/pregel-community-detection/) -to learn more. - -The processing system inside ArangoDB is based on: -[Pregel: A System for Large-Scale Graph Processing](http://www.dcs.bbk.ac.uk/~dell/teaching/cc/paper/sigmod10/p135-malewicz.pdf) – Malewicz et al. (Google), 2010. -This concept enables us to perform distributed graph processing, without the need for distributed global locking. - -Prerequisites -------------- - -If you are running a single ArangoDB instance in single-server mode, there are no requirements regarding the modeling -of your data. All you need is at least one vertex collection and one edge collection. Note that the performance may be -better, if the number of your shards / collections matches the number of CPU cores. - -When you use ArangoDB Community Edition in cluster mode, you might need to model your collections in a certain way to -ensure correct results. For more information see the next section. - -### Requirements for Collections in a Cluster (Non Smart Graph) - -To enable iterative graph processing for your data, you will need to ensure -that your vertex and edge collections are sharded in a specific way. - -The Pregel computing model requires all edges to be present on the DB Server where -the vertex document identified by the `_from` value is located. -This means the vertex collections need to be sharded by '_key' and the edge collection -will need to be sharded after an attribute which always contains the '_key' of the vertex. - -Our implementation currently requires every edge collection to be sharded after a "vertex" attributes, -additionally you will need to specify the key `distributeShardsLike` and an **equal** number of shards on every collection. -Only if these requirements are met can ArangoDB place the edges and vertices correctly. - -For example you might create your collections like this: - -```javascript -// Create main vertex collection: -db._create("vertices", { - shardKeys: ['_key'], - numberOfShards: 8 -}); - -// Optionally create arbitrary additional vertex collections -db._create("additonal", { - distributeShardsLike: "vertices", - numberOfShards: 8 -}); - -// Create (one or more) edge-collections: -db._createEdgeCollection("edges", { - shardKeys: ['vertex'], - distributeShardsLike: "vertices", - numberOfShards: 8 -}); -``` - -You will need to ensure that edge documents contain the proper values in their sharding attribute. -For a vertex document with the following content `{ _key: "A", value: 0 }` -the corresponding edge documents would have look like this: - -```js -{ "_from":"vertices/A", "_to": "vertices/B", "vertex": "A" } -{ "_from":"vertices/A", "_to": "vertices/C", "vertex": "A" } -{ "_from":"vertices/A", "_to": "vertices/D", "vertex": "A" } -... -``` - -This will ensure that outgoing edge documents will be placed on the same DBServer as the vertex. -Without the correct placement of the edges, the Pregel graph processing system will not work correctly, because -edges will not load correctly. - -Arangosh API ------------- - -### Starting an Algorithm Execution - -The Pregel API is accessible through the `@arangodb/pregel` package. -To start an execution you need to specify the **algorithm** name and the vertex and edge collections. -Alternatively you can specify a named graph. Additionally you can specify custom parameters which -vary for each algorithm. -The `start` method will always a unique ID which can be used to interact with the algorithm and later on. - -The below version of the `start` method can be used for named graphs: -```javascript -var pregel = require("@arangodb/pregel"); -var params = {}; -var execution = pregel.start("", "", params); -``` -`params` needs to be an object, the valid keys are mentioned below in the section -[Available Algorithms](#available-algorithms) - -Alternatively you might want to specify the vertex and edge collections directly. The call-syntax of the `start` -method changes in this case. The second argument must be an object with the keys `vertexCollections`and `edgeCollections`. - -```javascript -var pregel = require("@arangodb/pregel"); -var params = {}; -var execution = pregel.start("", {vertexCollections:["vertices"], edgeCollections:["edges"]}, {}); -``` -The last argument is still the parameter object. See below for a list of algorithms and parameters. - -### Status of an Algorithm Execution - -The code returned by the `pregel.start(...)` method can be used to -track the status of your algorithm. - -```javascript -var execution = pregel.start("sssp", "demograph", {source: "vertices/V"}); -var status = pregel.status(execution); -``` - -The result will tell you the current status of the algorithm execution. -It will tell you the current `state` of the execution, the current global superstep, the runtime, the global aggregator values as -well as the number of send and received messages. - -Valid values for the `state` field include: -- "running" algorithm is still running -- "done": The execution is done, the result might not be written back into the collection yet. -- "canceled": The execution was permanently canceled, either by the user or by an error. -- "in error": The execution is in an error state. This can be caused by primary DBServers being not reachable or being non responsive. - The execution might recover later, or switch to "canceled" if it was not able to recover successfully -- "recovering": The execution is actively recovering, will switch back to "running" if the recovery was successful - -The object returned by the `status` method might for example look something like this: - -```javascript -{ - "state" : "running", - "gss" : 12, - "totalRuntime" : 123.23, - "aggregators" : { - "converged" : false, - "max" : true, - "phase" : 2 - }, - "sendCount" : 3240364978, - "receivedCount" : 3240364975 -} -``` - -### Canceling an Execution / Discarding results - -To cancel an execution which is still running, and discard any intermediate results you can use the `cancel` method. -This will immediately free all memory taken up by the execution, and will make you lose all intermediary data. - -You might get inconsistent results if you cancel an execution while it is already in it's `done` state. The data is written -multi-threaded into all collection shards at once, this means there are multiple transactions simultaneously. A transaction might -already be committed when you cancel the execution job, therefore you might see the result in your collection. This does not apply -if you configured the execution to not write data into the collection. - -```javascript -// start a single source shortest path job -var execution = pregel.start("sssp", "demograph", {source: "vertices/V"}); -pregel.cancel(execution); -``` - -AQL integration ---------------- - -ArangoDB supports retrieving temporary Pregel results through the ArangoDB query language (AQL). -When our graph processing subsystem finishes executing an algorithm, the result can either be written back into the -database or kept in memory. In both cases the result can be queried via AQL. If the data was not written to the database -store it is only held temporarily, until the user calls the `cancel` methodFor example a user might want to query -only nodes with the most rank from the result set of a PageRank execution. - -```js -FOR v IN PREGEL_RESULT() - FILTER v.value >= 0.01 - RETURN v._key -``` - -Available Algorithms --------------------- - -There are a number of general parameters which apply to almost all algorithms: -* `store`: Is per default *true*, the Pregel engine will write results back to the database. - If the value is *false* then you can query the results via AQL, - see [AQL integration](#aql-integration). -* `maxGSS`: Maximum number of global iterations for this algorithm -* `parallelism`: Number of parallel threads to use per worker. Does not influence the number of threads used to load - or store data from the database (this depends on the number of shards). -* `async`: Algorithms which support async mode, will run without synchronized global iterations, - might lead to performance increases if you have load imbalances. -* `resultField`: Most algorithms will write the result into this field - -### Page Rank - -PageRank is a well known algorithm to rank documents in a graph. The algorithm will run until -the execution converges. Specify a custom threshold with the parameter `threshold`, to run for a fixed -number of iterations use the `maxGSS` parameter. - -```javascript -var pregel = require("@arangodb/pregel"); -pregel.start("pagerank", "graphname", {maxGSS: 100, threshold:0.00000001, resultField:'rank'}) -``` - -#### Seeded PageRank - -It is possible to specify an initial distribution for the vertex-documents in your graph. To define these -seed ranks / centralities you can specify a `sourceField` in the properties for this algorithm. -If the specified field is set on a document _and_ the value is numeric, then it will be -used instead of the default initial rank of `1 / numVertices`. - -```javascript -var pregel = require("@arangodb/pregel"); -pregel.start("pagerank", "graphname", {maxGSS: 20, threshold:0.00000001, sourceField:'seed', resultField:'rank'}) -``` - -### Single-Source Shortest Path - -Calculates the distance of each vertex to a certain shortest path. The algorithm will run until it converges, -the iterations are bound by the diameter (the longest shortest path) of your graph. - -```javascript -var pregel = require("@arangodb/pregel"); -pregel.start("sssp", "graphname", {source:"vertices/1337"}) -``` - -### Connected Components - -There are two algorithms to find connected components in a graph. To find weakly connected components (WCC) -you can use the algorithm named "connectedcomponents", to find strongly connected components (SCC) you can use the algorithm -named "scc". Both algorithm will assign a component ID to each vertex. - -A weakly connected components means that there exist a path from every vertex pair in that component. -WCC is a very simple and fast algorithm, which will only work correctly on undirected graphs. -Your results on directed graphs may vary, depending on how connected your components are. - -In the case of SCC a component means every vertex is reachable from any other vertex in the same component. -The algorithm is more complex than the WCC algorithm and requires more RAM, because each vertex needs to store much more state. -Consider using WCC if you think your data may be suitable for it. - -```javascript -var pregel = require("@arangodb/pregel"); -// weakly connected components -pregel.start("connectedcomponents", "graphname") -// strongly connected components -pregel.start("scc", "graphname") -``` - -### Hyperlink-Induced Topic Search (HITS) - -HITS is a link analysis algorithm that rates Web pages, developed by Jon Kleinberg (The algorithm is also known as hubs and authorities). - -The idea behind Hubs and Authorities comes from the typical structure of the web: Certain websites known as hubs, serve as large directories that are not actually -authoritative on the information that they hold. These hubs are used as compilations of a broad catalog of information that leads users direct to other authoritative webpages. -The algorithm assigns each vertex two scores: The authority-score and the hub-score. The authority score rates how many good hubs point to a particular -vertex (or webpage), the hub score rates how good (authoritative) the vertices pointed to are. For more see https://en.wikipedia.org/wiki/HITS_algorithm - -Our version of the algorithm -converges after a certain amount of time. The parameter *threshold* can be used to set a limit for the convergence (measured as maximum absolute difference of the hub and -authority scores between the current and last iteration) -When you specify the result field name, the hub score will be stored in `_hub` and the authority score in -`_auth`. -The algorithm can be executed like this: - -```javascript -var pregel = require("@arangodb/pregel"); -var handle = pregel.start("hits", "yourgraph", {threshold:0.00001, resultField: "score"}); -``` - -### Vertex Centrality - -Centrality measures help identify the most important vertices in a graph. They can be used in a wide range of applications: -For example they can be used to identify *influencers* in social networks, or *middle-men* in terrorist networks. -There are various definitions for centrality, the simplest one being the vertex degree. -These definitions were not designed with scalability in mind. It is probably impossible to discover an efficient algorithm which computes them in a distributed way. -Fortunately there are scalable substitutions available, which should be equally usable for most use cases. - -![Illustration of an execution of different centrality measures (Freeman 1977)](centrality_visual.png) - -#### Effective Closeness - -A common definitions of centrality is the **closeness centrality** (or closeness). -The closeness of a vertex in a graph is the inverse average length of the shortest path between the vertex -and all other vertices. For vertices *x*, *y* and shortest distance *d(y,x)* it is defined as - -![Vertex Closeness](closeness.png) - -Effective Closeness approximates the closeness measure. The algorithm works by iteratively estimating the number -of shortest paths passing through each vertex. The score will approximates the the real closeness score, since -it is not possible to actually count all shortest paths due to the horrendous O(n^2 * d) memory requirements. -The algorithm is from the paper *Centralities in Large Networks: Algorithms and Observations (U Kang et.al. 2011)* - -ArangoDBs implementation approximates the number of shortest path in each iteration by using a HyperLogLog counter with 64 buckets. -This should work well on large graphs and on smaller ones as well. The memory requirements should be **O(n * d)** where -*n* is the number of vertices and *d* the diameter of your graph. Each vertex will store a counter for each iteration of the -algorithm. The algorithm can be used like this - -```javascript -const pregel = require("@arangodb/pregel"); -const handle = pregel.start("effectivecloseness", "yourgraph", {resultField: "closeness"}); -``` - -#### LineRank - -Another common measure is the [betweenness* centrality](https://en.wikipedia.org/wiki/Betweenness_centrality): -It measures the number of times a vertex is part of shortest paths between any pairs of vertices. -For a vertex *v* betweenness is defined as - -![Vertex Betweeness](betweeness.png) - -Where the σ represents the number of shortest paths between *x* and *y*, and σ(v) represents the -number of paths also passing through a vertex *v*. By intuition a vertex with higher betweeness centrality will have more information -passing through it. - -**LineRank** approximates the random walk betweenness of every vertex in a graph. This is the probability that someone starting on -an arbitrary vertex, will visit this node when he randomly chooses edges to visit. -The algorithm essentially builds a line graph out of your graph (switches the vertices and edges), and then computes a score similar to PageRank. -This can be considered a scalable equivalent to vertex betweeness, which can be executed distributedly in ArangoDB. -The algorithm is from the paper *Centralities in Large Networks: Algorithms and Observations (U Kang et.al. 2011)* - -```javascript -const pregel = require("@arangodb/pregel"); -const handle = pregel.start("linerank", "yourgraph", {"resultField": "rank"}); -``` - -### Community Detection - -Graphs based on real world networks often have a community structure. This means it is possible to find groups of vertices such that each each vertex group is internally more densely connected than outside the group. -This has many applications when you want to analyze your networks, for example -Social networks include community groups (the origin of the term, in fact) based on common location, interests, occupation, etc. - -#### Label Propagation - -*Label Propagation* can be used to implement community detection on large graphs. The idea is that each -vertex should be in the community that most of his neighbors are in. We iteratively determine this by first -assigning random Community ID's. Then each iteration, a vertex will send it's current community ID to all his neighbor vertices. -Then each vertex adopts the community ID he received most frequently during the iteration. - -The algorithm runs until it converges, -which likely never really happens on large graphs. Therefore you need to specify a maximum iteration bound which suits you. -The default bound is 500 iterations, which is likely too large for your application. -Should work best on undirected graphs, results on directed graphs might vary depending on the density of your graph. - -```javascript -const pregel = require("@arangodb/pregel"); -const handle = pregel.start("labelpropagation", "yourgraph", {maxGSS:100, resultField: "community"}); -``` - -#### Speaker-Listener Label Propagation - -The [Speaker-listener Label Propagation](https://arxiv.org/pdf/1109.5720.pdf) (SLPA) can be used to implement community detection. -It works similar to the label propagation algorithm, -but now every node additionally accumulates a memory of observed labels (instead of forgetting all but one label). - -Before the algorithm run, every vertex is initialized with an unique ID (the initial community label). -During the run three steps are executed for each vertex: - -1. Current vertex is the listener all other vertices are speakers -2. Each speaker sends out a label from memory, we send out a random label with a probability - proportional to the number of times the vertex observed the label -3. The listener remembers one of the labels, we always choose the most frequently observed label - -```javascript -const pregel = require("@arangodb/pregel"); -const handle = pregel.start("slpa", "yourgraph", {maxGSS:100, resultField: "community"}); -``` - -You can also execute SLPA with the `maxCommunities` parameter to limit the number of output communities. -Internally the algorithm will still keep the memory of all labels, but the output is reduced to just he `n` most frequently -observed labels. - -```javascript -const pregel = require("@arangodb/pregel"); -const handle = pregel.start("slpa", "yourgraph", {maxGSS:100, resultField:"community", maxCommunities:1}); -// check the status periodically for completion -pregel.status(handle); -``` diff --git a/Documentation/Books/Manual/Graphs/Pregel/betweeness.png b/Documentation/Books/Manual/Graphs/Pregel/betweeness.png deleted file mode 100644 index e923e6a6ca93..000000000000 Binary files a/Documentation/Books/Manual/Graphs/Pregel/betweeness.png and /dev/null differ diff --git a/Documentation/Books/Manual/Graphs/Pregel/centrality_visual.png b/Documentation/Books/Manual/Graphs/Pregel/centrality_visual.png deleted file mode 100644 index 093caf8ea383..000000000000 Binary files a/Documentation/Books/Manual/Graphs/Pregel/centrality_visual.png and /dev/null differ diff --git a/Documentation/Books/Manual/Graphs/Pregel/closeness.png b/Documentation/Books/Manual/Graphs/Pregel/closeness.png deleted file mode 100644 index 239e14e71a6a..000000000000 Binary files a/Documentation/Books/Manual/Graphs/Pregel/closeness.png and /dev/null differ diff --git a/Documentation/Books/Manual/Graphs/README.md b/Documentation/Books/Manual/Graphs/README.md deleted file mode 100644 index e7d838926838..000000000000 --- a/Documentation/Books/Manual/Graphs/README.md +++ /dev/null @@ -1,327 +0,0 @@ -ArangoDB Graphs -=============== - -First Steps with Graphs ------------------------ - -A Graph consists of *vertices* and *edges*. Edges are stored as documents in *edge collections*. -A vertex can be a document of a *document collection* or of an *edge collection* (so *edges* can be used as *vertices*). -Which collections are used within a named graph is defined via *edge definitions*. -A named graph can contain more than one *edge definition*, at least one is needed. -Graphs allow you to structure your models in line with your domain and group them logically in collections and giving you the power to query them in the same graph queries. - -{% hint 'info' %} -New to graphs? [**Take our free graph course for freshers**](https://www.arangodb.com/arangodb-graph-course/) -and get from zero knowledge to advanced query techniques. -{% endhint %} - -Coming from a relational background - what's a graph? ------------------------------------------------------ - -In SQL you commonly have the construct of a relation table to store *n:m* relations between two data tables. -An *edge collection* is somewhat similar to these *relation tables*; *vertex collections* resemble the data tables with the objects to connect. -While simple graph queries with fixed number of hops via the relation table may be doable in SQL with several nested joins, -graph databases can handle an arbitrary number of these hops over edge collections - this is called *traversal*. -Also edges in one edge collection may point to several vertex collections. -Its common to have attributes attached to edges, i.e. a *label* naming this interconnection. -Edges have a direction, with their relations `_from` and `_to` pointing *from* one document *to* another document stored in vertex collections. -In queries you can define in which directions the edge relations may be followed (`OUTBOUND`: `_from` → `_to`, `INBOUND`: `_from` ← `_to`, `ANY`: `_from` ↔ `_to`). - -Named Graphs ------------- - -Named graphs are completely managed by ArangoDB, and thus also [visible in the web interface](../Programs/WebInterface/Graphs.md). -They use the full spectrum of ArangoDB's graph features. You may access them via several interfaces. - -- [AQL Graph Operations](../../AQL/Graphs/index.html) with several flavors: - - [AQL Traversals](../../AQL/Graphs/Traversals.html) on both named and anonymous graphs - - [AQL Shortest Path](../../AQL/Graphs/ShortestPath.html) on both named and anonymous graph -- [JavaScript General Graph implementation, as you may use it in Foxx Services](GeneralGraphs/README.md) - - [Graph Management](GeneralGraphs/Management.md); creating & manipulating graph definitions; inserting, updating and deleting vertices and edges into graphs - - [Graph Functions](GeneralGraphs/Functions.md) for working with edges and vertices, to analyze them and their relations -- [JavaScript Smart Graph implementation, for scalable graphs](SmartGraphs/README.md) - - [Smart Graph Management](SmartGraphs/Management.md); creating & manipulating SmartGraph definitions; Differences to General Graph -- [RESTful General Graph interface](../../HTTP/Gharial/index.html) used to implement graph management in client drivers - -### Manipulating collections of named graphs with regular document functions - -The underlying collections of the named graphs are still accessible using the standard methods for collections. -However the graph module adds an additional layer on top of these collections giving you the following guarantees: - -* All modifications are executed transactional -* If you delete a vertex all edges will be deleted, you will never have loose ends -* If you insert an edge it is checked if the edge matches the *edge definitions*, your edge collections will only contain valid edges - -These guarantees are lost if you access the collections in any other way than the graph module or AQL, -so if you delete documents from your vertex collections directly, the edges pointing to them will be remain in place. - -Anonymous graphs ----------------- - -Sometimes you may not need all the powers of named graphs, but some of its bits may be valuable to you. -You may use anonymous graphs in the [traversals](Traversals/README.md) -and in the [Working with Edges](Edges/README.md) chapter. -Anonymous graphs don't have *edge definitions* describing which *vertex collection* is connected by which *edge collection*. The graph model has to be maintained in the client side code. -This gives you more freedom than the strict *named graphs*. - -- [AQL Graph Operations](../../AQL/Graphs/index.html) are available for both, named and anonymous graphs: - - [AQL Traversals](../../AQL/Graphs/Traversals.html) - - [AQL Shortest Path](../../AQL/Graphs/ShortestPath.html) - -### When to choose anonymous or named graphs? - -As noted above, named graphs ensure graph integrity, both when inserting or removing edges or vertices. -So you won't encounter dangling edges, even if you use the same vertex collection in several named graphs. -This involves more operations inside the database which come at a cost. -Therefore anonymous graphs may be faster in many operations. -So this question may be narrowed down to: 'Can I afford the additional effort or do I need the warranty for integrity?'. - -Multiple edge collections vs. `FILTER`s on edge document attributes -------------------------------------------------------------------- - -If you want to only traverse edges of a specific type, there are two ways to achieve this. The first would be -an attribute in the edge document - i.e. `type`, where you specify a differentiator for the edge - -i.e. `"friends"`, `"family"`, `"married"` or `"workmates"`, so you can later `FILTER e.type = "friends"` -if you only want to follow the friend edges. - -Another way, which may be more efficient in some cases, is to use different edge collections for different -types of edges, so you have `friend_edges`, `family_edges`, `married_edges` and `workmate_edges` as collection names. -You can then configure several named graphs including a subset of the available edge and vertex collections - -or you use anonymous graph queries, where you specify a list of edge collections to take into account in that query. -To only follow friend edges, you would specify `friend_edges` as sole edge collection. - -Both approaches have advantages and disadvantages. `FILTER` operations on edge attributes will do comparisons on -each traversed edge, which may become CPU-intense. When not *finding* the edges in the first place because of the -collection containing them is not traversed at all, there will never be a reason to actually check for their -`type` attribute with `FILTER`. - -The multiple edge collections approach is limited by the [number of collections that can be used simultaneously in one query](../../AQL/Fundamentals/Syntax.html#collection-names). -Every collection used in a query requires some resources inside of ArangoDB and the number is therefore limited -to cap the resource requirements. You may also have constraints on other edge attributes, such as a hash index -with a unique constraint, which requires the documents to be in a single collection for the uniqueness guarantee, -and it may thus not be possible to store the different types of edges in multiple edge collections. - -So, if your edges have about a dozen different types, it's okay to choose the collection approach, otherwise -the `FILTER` approach is preferred. You can still use `FILTER` operations on edges of course. You can get rid -of a `FILTER` on the `type` with the former approach, everything else can stay the same. - -Which part of my data is an Edge and which a Vertex? ----------------------------------------------------- - -The main objects in your data model, such as users, groups or articles, are usually considered to be vertices. -For each type of object, a document collection (also called vertex collection) should store the individual entities. -Entities can be connected by edges to express and classify relations between vertices. It often makes sense to have -an edge collection per relation type. - -ArangoDB does not require you to store your data in graph structures with edges and vertices, you can also decide -to embed attributes such as which groups a user is part of, or `_id`s of documents in another document instead of -connecting the documents with edges. It can be a meaningful performance optimization for *1:n* relationships, if -your data is not focused on relations and you don't need graph traversal with varying depth. It usually means -to introduce some redundancy and possibly inconsistencies if you embed data, but it can be an acceptable tradeoff. - -### Vertices - -Let's say we have two vertex collections, `Users` and `Groups`. Documents in the `Groups` collection contain the attributes -of the Group, i.e. when it was founded, its subject, an icon URL and so on. `Users` documents contain the data specific to a -user - like all names, birthdays, Avatar URLs, hobbies... - -### Edges - -We can use an edge collection to store relations between users and groups. Since multiple users may be in an arbitrary -number of groups, this is an **m:n** relation. The edge collection can be called `UsersInGroups` with i.e. one edge -with `_from` pointing to `Users/John` and `_to` pointing to `Groups/BowlingGroupHappyPin`. This makes the user **John** -a member of the group **Bowling Group Happy Pin**. Attributes of this relation may contain qualifiers to this relation, -like the permissions of **John** in this group, the date when he joined the group etc. - -![User in group example](graph_user_in_group.png) - -So roughly put, if you use documents and their attributes in a sentence, nouns would typically be vertices, verbs become the edges. -You can see this in the [knows graph](#the-knowsgraph) below: - - Alice knows Bob, who in term knows Charlie. - -### Advantages of this approach - -Graphs give you the advantage of not just being able to have a fixed number of **m:n** relations in a row, but an -arbitrary number. Edges can be traversed in both directions, so it's easy to determine all -groups a user is in, but also to find out which members a certain group has. Users could also be -interconnected to create a social network. - -Using the graph data model, dealing with data that has lots of relations stays manageable and can be queried in very -flexible ways, whereas it would cause headache to handle it in a relational database system. - -Backup and restore ------------------- - -For sure you want to have backups of your graph data, you can use [Arangodump](../Programs/Arangodump/README.md) to create the backup, -and [Arangorestore](../Programs/Arangorestore/README.md) to restore a backup into a new ArangoDB. You should however note that: - -- you need the system collection `_graphs` if you backup named graphs. -- you need to backup the complete set of all edge and vertex collections your graph consists of. Partial dump/restore may not work. - -Managing graphs ---------------- - -By default you should use [the interface your driver provides to manage graphs](../../HTTP/Gharial/Management.html). - -This is i.e. documented [in Graphs-Section of the ArangoDB Java driver](https://github.com/arangodb/arangodb-java-driver#graphs). - -Example Graphs --------------- - -ArangoDB comes with a set of easily graspable graphs that are used to demonstrate the APIs. -You can use the `add samples` tab in the `create graph` window in the webinterface, or load the module `@arangodb/graph-examples/example-graph` in arangosh and use it to create instances of these graphs in your ArangoDB. -Once you've created them, you can [inspect them in the webinterface](../Programs/WebInterface/Graphs.md) - which was used to create the pictures below. - -You [can easily look into the innards of this script](https://github.com/arangodb/arangodb/blob/devel/js/common/modules/%40arangodb/graph-examples/example-graph.js) for reference about howto manage graphs programatically. - -### The Knows\_Graph - -A set of persons knowing each other: -![Persons relation Example Graph](knows_graph.png) - -The *knows* graph consists of one *vertex collection* `persons` connected via one *edge collection* `knows`. -It will contain five persons *Alice*, *Bob*, *Charlie*, *Dave* and *Eve*. -We will have the following directed relations: - - - *Alice* knows *Bob* - - *Bob* knows *Charlie* - - *Bob* knows *Dave* - - *Eve* knows *Alice* - - *Eve* knows *Bob* - -This is how we create it, inspect its *vertices* and *edges*, and drop it again: - - @startDocuBlockInline graph_create_knows_sample - @EXAMPLE_ARANGOSH_OUTPUT{graph_create_knows_sample} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var g = examples.loadGraph("knows_graph"); - db.persons.toArray() - db.knows.toArray(); - examples.dropGraph("knows_graph"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock graph_create_knows_sample - -**Note:** with the default "Search Depth" of 2 of the graph viewer you may not see all edges of this graph. - -### The Social Graph - -A set of persons and their relations: - -![Social Example Graph](social_graph.png) - -This example has female and male persons as *vertices* in two *vertex collections* - `female` and `male`. The *edges* are their connections in the `relation` *edge collection*. -This is how we create it, inspect its *vertices* and *edges*, and drop it again: - - @startDocuBlockInline graph_create_social_sample - @EXAMPLE_ARANGOSH_OUTPUT{graph_create_social_sample} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var graph = examples.loadGraph("social"); - db.female.toArray() - db.male.toArray() - db.relation.toArray() - examples.dropGraph("social"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock graph_create_social_sample - - -### The City Graph - -A set of european cities, and their fictional traveling distances as connections: - -![Cities Example Graph](cities_graph.png) - -The example has the cities as *vertices* in several *vertex collections* - `germanCity` and `frenchCity`. The *edges* are their interconnections in several *edge collections* `french / german / international Highway`. This is how we create it, inspect its *edges* and *vertices*, and drop it again: - - @startDocuBlockInline graph_create_cities_sample - @EXAMPLE_ARANGOSH_OUTPUT{graph_create_cities_sample} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var g = examples.loadGraph("routeplanner"); - db.frenchCity.toArray(); - db.germanCity.toArray(); - db.germanHighway.toArray(); - db.frenchHighway.toArray(); - db.internationalHighway.toArray(); - examples.dropGraph("routeplanner"); - @END_EXAMPLE_ARANGOSH_RUN - @endDocuBlock graph_create_cities_sample - -### The Traversal Graph - -This graph was designed to demonstrate filters in traversals. It has some labels to filter on it. - -![traversal graph](traversal_graph.png) - -The example has all its vertices in the *circles* collection, and an *edges* edge collection to connect them. -Circles have unique numeric labels. Edges have two boolean attributes (*theFalse* always being false, *theTruth* always being true) and a label sorting *B* - *D* to the left side, *G* - *K* to the right side. Left and right side split into Paths - at *B* and *G* which are each direct neighbours of the root-node *A*. Starting from *A* the graph has a depth of 3 on all its paths. - - @startDocuBlockInline graph_create_traversal_sample - @EXAMPLE_ARANGOSH_OUTPUT{graph_create_traversal_sample} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var g = examples.loadGraph("traversalGraph"); - db.circles.toArray(); - db.edges.toArray(); - examples.dropGraph("traversalGraph"); - @END_EXAMPLE_ARANGOSH_RUN - @endDocuBlock graph_create_traversal_sample - -**Note:** with the default "Search Depth" of 2 of the graph viewer you may not see all nodes of this graph. - -### The World Graph - -![world graph](world_graph.png) - -The world country graph structures its nodes like that: world → continent → country → capital. In some cases edge directions aren't forward (therefore it will be displayed disjunct in the graph viewer). It has two ways of creating it. One using the named graph utilities (*worldCountry*), one without (*worldCountryUnManaged*). -It is used to demonstrate raw traversal operations. - - @startDocuBlockInline graph_create_world_sample - @EXAMPLE_ARANGOSH_OUTPUT{graph_create_world_sample} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var g = examples.loadGraph("worldCountry"); - db.worldVertices.toArray(); - db.worldEdges.toArray(); - examples.dropGraph("worldCountry"); - var g = examples.loadGraph("worldCountryUnManaged"); - examples.dropGraph("worldCountryUnManaged"); - @END_EXAMPLE_ARANGOSH_RUN - @endDocuBlock graph_create_world_sample - -### The Mps Graph - -This graph was created to demonstrate a use case of the shortest path algorithm. Even though the algorithm can only determine one shortest path, it is possible to return multiple shortest paths with two separate queries. Therefore the graph is named after the [**m**ultiple **p**ath **s**earch](../../AQL/Examples/MultiplePaths.html) use case. - -![mps graph](mps_graph.png) - -The example graph consists of *vertices* in the `mps_verts` collection and *edges* in the `mps_edges` collection. It is a simple traversal graph with start node *A* and end node *C*. - -This is how we create it, inspect its *vertices* and *edges*, and drop it again: - - @startDocuBlockInline graph_create_mps_sample - @EXAMPLE_ARANGOSH_OUTPUT{graph_create_mps_sample} - var examples = require("@arangodb/graph-examples/example-graph.js"); - var g = examples.loadGraph("mps_graph"); - db.mps_verts.toArray(); - db.mps_edges.toArray(); - examples.dropGraph("mps_graph"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock graph_create_mps_sample - -### Higher volume graph examples - -All of the above examples are rather small so they are easier to comprehend and can demonstrate the way the functionality works. -There are however several datasets freely available on the web that are a lot bigger. -[We collected some of them with import scripts](https://github.com/arangodb/example-datasets) so you may play around with them. -Another huge graph is the [Pokec social network](https://snap.stanford.edu/data/soc-pokec.html) -from Slovakia that we [used for performance testing on several databases](https://www.arangodb.com/2015/06/multi-model-benchmark/); -You will find importing scripts etc. in this blogpost. - -Cookbook examples ------------------ - -The above referenced chapters describe the various APIs of ArangoDBs graph engine with small examples. Our cookbook has some more real life examples: - - - [Traversing a graph in full depth](../../Cookbook/Graph/FulldepthTraversal.html) - - [Retrieving documents from ArangoDB without knowing the structure](../../Cookbook/UseCases/JavaDriverBaseDocument.html) - - [Using a custom visitor from node.js](../../Cookbook/Graph/CustomVisitorFromNodeJs.html) - - [AQL Example Queries on an Actors and Movies Database](../../Cookbook/Graph/ExampleActorsAndMovies.html) - diff --git a/Documentation/Books/Manual/Graphs/SmartGraphs/Management.md b/Documentation/Books/Manual/Graphs/SmartGraphs/Management.md deleted file mode 100644 index 8a1c6bfe29b3..000000000000 --- a/Documentation/Books/Manual/Graphs/SmartGraphs/Management.md +++ /dev/null @@ -1,141 +0,0 @@ -Smart Graph Management -====================== - -This chapter describes the JavaScript interface for [creating and modifying SmartGraphs](../README.md). -At first you have to note that every SmartGraph is a specialized version of a General Graph, which means all of the General Graph functionality is available on a SmartGraph as well. -The major difference of both modules is handling of the underlying collections, the General Graph does not enforce or maintain any sharding of the collections and can therefor combine arbitrary sets of existing collections. -SmartGraphs enforce and rely on a special sharding of the underlying collections and hence can only work with collections that are created through the SmartGraph itself. -This also means that SmartGraphs cannot be overlapping, a collection can either be sharded for one SmartGraph or for the other. -If you need to make sure that all queries can be executed with SmartGraph performance, just create one large SmartGraph covering everything and query it stating the subset of edge collections explicitly. -To generally understand the concept of this module please read the chapter about [General Graph Management](../GeneralGraphs/Management.md) first. -In the following we will only describe the overloaded functionality. -Everything else works identical in both modules. - -Create a graph --------------- - -Also SmartGraphs require edge relations to be created, the format of the relations is identical. -The only difference is that all collections used within the relations to create a new SmartGraph cannot exist yet. They have to be created by the Graph in order to enforce the correct sharding. - - - -Create a graph - -`graph_module._create(graphName, edgeDefinitions, orphanCollections, smartOptions)` - -The creation of a graph requires the name and some SmartGraph options. -Due to the API `edgeDefinitions` and `orphanCollections` have to be given, but -both can be empty arrays and can be created later. -The `edgeDefinitions` can be created using the convenience method `_relation` known from the `general-graph` module, which is also available here. -`orphanCollections` again is just a list of additional vertex collections which are not yet connected via edges but should follow the same sharding to be connected later on. -All collections used within the creation process are newly created. -The process will fail if one of them already exists. -All newly created collections will immediately be dropped again in the failed case. - -**Parameters** - -* graphName (required) Unique identifier of the graph -* edgeDefinitions (required) List of relation definition objects, may be empty -* orphanCollections (required) List of additional vertex collection names, may be empty -* smartOptions (required) A JSON object having the following keys: - * numberOfShards (required) - The number of shards that will be created for each collection. To maintain the correct sharding all collections need an identical number of shards. This cannot be modified after creation of the graph. - * smartGraphAttribute (required) - The attribute that will be used for sharding. All vertices are required to have this attribute set and it has to be a string. Edges derive the attribute from their connected vertices. - - -**Examples** - - -Create an empty graph, edge definitions can be added at runtime: - - - arangosh> var graph_module = require("@arangodb/smart-graph"); - arangosh> var graph = graph_module._create("myGraph", [], [], {smartGraphAttribute: "region", numberOfShards: 9}); - [ SmartGraph myGraph EdgeDefinitions: [ ] VertexCollections: [ ] ] - - -Create a graph using an edge collection `edges` and a single vertex collection `vertices` - - - arangosh> var graph_module = require("@arangodb/smart-graph"); - arangosh> var edgeDefinitions = [ graph_module._relation("edges", "vertices", "vertices") ]; - arangosh> var graph = graph_module._create("myGraph", edgeDefinitions, [], {smartGraphAttribute: "region", numberOfShards: 9}); - [ SmartGraph myGraph EdgeDefinitions: [ "edges: [vertices] -> [vertices]" ] VertexCollections: [ ] ] - - -Create a graph with edge definitions and orphan collections: - - - arangosh> var graph_module = require("@arangodb/smart-graph"); - arangosh> var edgeDefinitions = [ graph_module._relation("myRelation", ["male", "female"], ["male", "female"]) ]; - arangosh> var graph = graph_module._create("myGraph", edgeDefinitions, ["sessions"], {smartGraphAttribute: "region", numberOfShards: 9}); - [ Graph myGraph EdgeDefinitions: [ - "myRelation: [female, male] -> [female, male]" - ] VertexCollections: [ - "sessions" - ] ] - - -Modify a graph definition at runtime ------------------------------------- - -After you have created a SmartGraph its definition is also not immutable. -You can still add or remove relations. -This is again identical to General Graphs. -However there is one important difference: -You can only add collections that either *do not exist*, or that have been created by this graph earlier. -The later can be achieved if you for example remove an orphan collection from this graph, without dropping the collection itself. -Than after some time you decide to add it again, it can be used. -This is because the enforced sharding is still applied to this vertex collection, hence it is suitable to be added again. - - -### Remove a vertex collection - - - -Remove a vertex collection from the graph - -`graph._removeVertexCollection(vertexCollectionName, dropCollection)` - -In most cases this function works identically to the General Graph one. -But there is one special case: -The first vertex collection added to the graph (either orphan or within a relation) defines the sharding for all collections within the graph. -This collection can never be removed from the graph. - - -**Parameters** - -* vertexCollectionName (required) Name of vertex collection. -* dropCollection (optional) If true the collection will be dropped if it is - not used in any other graph. Default: false. - -**Examples** - -The following example shows that you cannot drop the initial collection. -You have to drop the complete graph. -If you just want to get rid of the data `truncate` it. - - - arangosh> var graph_module = require("@arangodb/smart-graph") - arangosh> var relation = graph_module._relation("edges", "vertices", "vertices"); - arangosh> var graph = graph_module._create("myGraph", [relation], ["other"], {smartGraphAttribute: "region", numberOfShards: 9}); - arangosh> graph._orphanCollections(); - [ - "other" - ] - arangosh> graph._deleteEdgeDefinition("edges"); - arangosh> graph._orphanCollections(); - [ - "vertices", - "other" - ] - arangosh> graph._removeVertexCollection("other"); - arangosh> graph._orphanCollections(); - [ - "vertices" - ] - arangosh> graph._removeVertexCollection("vertices"); - ArangoError 4002: cannot drop this smart collection - - diff --git a/Documentation/Books/Manual/Graphs/SmartGraphs/README.md b/Documentation/Books/Manual/Graphs/SmartGraphs/README.md deleted file mode 100644 index 1bfbaf53a5d0..000000000000 --- a/Documentation/Books/Manual/Graphs/SmartGraphs/README.md +++ /dev/null @@ -1,116 +0,0 @@ -SmartGraphs -=========== - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -This chapter describes the `smart-graph` module, which enables you to manage -graphs at scale. It will give a vast performance benefit for all graphs sharded -in an ArangoDB Cluster. On a single server this feature is pointless, hence it -is only available in cluster mode. - -In terms of querying there is no difference between SmartGraphs and -General Graphs. The former is a transparent replacement for the latter. -For graph querying please refer to [AQL Graph Operations](../../../AQL/Graphs/index.html) -and [General Graph Functions](../GeneralGraphs/Functions.md) sections. -The optimizer is clever enough to identify whether it is a SmartGraph or not. - -The difference is only in the management section: creating and modifying the -underlying collections of the graph. For a detailed API reference please refer -to [SmartGraph Management](Management.md). - -Do the hands-on -[ArangoDB SmartGraphs Tutorial](https://www.arangodb.com/using-smartgraphs-arangodb/) -to learn more. - -What makes a graph smart? -------------------------- - -Most graphs have one feature that divides the entire graph into several smaller -subgraphs. These subgraphs have a large amount of edges that only connect -vertices in the same subgraph and only have few edges connecting vertices from -other subgraphs. - -Examples for these graphs are: - -- **Social Networks**
- Typically the feature here is the region/country users live in. - Every user typically has more contacts in the same region/country then she - has in other regions/countries - -- **Transport Systems**
- For those also the feature is the region/country. You have many local - transportation but only few across countries. - -- **E-Commerce**
- In this case probably the category of products is a good feature. - Often products of the same category are bought together. - -If this feature is known, SmartGraphs can make use if it. - -When creating a SmartGraph you have to define a smartAttribute, which is the -name of an attribute stored in every vertex. The graph will than be -automatically sharded in such a way that all vertices with the same value are -stored on the same physical machine, all edges connecting vertices with -identical smartAttribute values are stored on this machine as well. -During query time the query optimizer and the query executor both know for -every document exactly where it is stored and can thereby minimize network -overhead. Everything that can be computed locally will be computed locally. - -Benefits of SmartGraphs ------------------------ - -Because of the above described guaranteed sharding, the performance of queries -that only cover one subgraph have a performance almost equal to an only local -computation. Queries that cover more than one subgraph require some network -overhead. The more subgraphs are touched the more network cost will apply. -However the overall performance is never worse than the same query using a -General Graph. - -Getting started ---------------- - -First of all SmartGraphs *cannot use existing collections*, when switching to -SmartGraph from an existing data set you have to import the data into a fresh -SmartGraph. This switch can be easily achieved with -[arangodump](../../Programs/Arangodump/README.md) and -[arangorestore](../../Programs/Arangorestore/README.md). -The only thing you have to change in this pipeline is that you create the new -collections with the SmartGraph before starting `arangorestore`. - -- Create a graph - - In comparison to General Graph we have to add more options when creating the - graph. The two options `smartGraphAttribute` and `numberOfShards` are - required and cannot be modified later. - - @startDocuBlockInline smartGraphCreateGraphHowTo1 - arangosh> var graph_module = require("@arangodb/smart-graph"); - arangosh> var graph = graph_module._create("myGraph", [], [], {smartGraphAttribute: "region", numberOfShards: 9}); - arangosh> graph; - [ SmartGraph myGraph EdgeDefinitions: [ ] VertexCollections: [ ] ] - @endDocuBlock smartGraphCreateGraphHowTo1 - -- Add some vertex collections - - This is again identical to General Graph. The module will setup correct - sharding for all these collections. *Note*: The collections have to be new. - - @startDocuBlockInline smartGraphCreateGraphHowTo2 - arangosh> graph._addVertexCollection("shop"); - arangosh> graph._addVertexCollection("customer"); - arangosh> graph._addVertexCollection("pet"); - arangosh> graph; - [ SmartGraph myGraph EdgeDefinitions: [ ] VertexCollections: [ "shop", "customer", "pet" ] ] - @endDocuBlock smartGraphCreateGraphHowTo2 - -- Define relations on the Graph - - @startDocuBlockInline smartGraphCreateGraphHowTo3 - arangosh> var rel = graph_module._relation("isCustomer", ["shop"], ["customer"]); - arangosh> graph._extendEdgeDefinitions(rel); - arangosh> graph; - [ SmartGraph myGraph EdgeDefinitions: [ "isCustomer: [shop] -> [customer]" ] VertexCollections: [ "pet" ] ] - @endDocuBlock smartGraphCreateGraphHowTo3 diff --git a/Documentation/Books/Manual/Graphs/Traversals/ExampleData.md b/Documentation/Books/Manual/Graphs/Traversals/ExampleData.md deleted file mode 100644 index 616f53138d82..000000000000 --- a/Documentation/Books/Manual/Graphs/Traversals/ExampleData.md +++ /dev/null @@ -1,200 +0,0 @@ -Example Data -============ - -The following examples all use a vertex collection *v* and an [edge collection](../../Appendix/Glossary.md#edge-collection) *e*. The vertex -collection *v* contains continents, countries, and capitals. The edge collection *e* -contains connections between continents and countries, and between countries and capitals. - -To set up the collections and populate them with initial data, the following script was used: - -```js -db._create("v"); -db._createEdgeCollection("e"); - -// vertices: root node -db.v.save({ _key: "world", name: "World", type: "root" }); - -// vertices: continents -db.v.save({ _key: "continent-africa", name: "Africa", type: "continent" }); -db.v.save({ _key: "continent-asia", name: "Asia", type: "continent" }); -db.v.save({ _key: "continent-australia", name: "Australia", type: "continent" }); -db.v.save({ _key: "continent-europe", name: "Europe", type: "continent" }); -db.v.save({ _key: "continent-north-america", name: "North America", type: "continent" }); -db.v.save({ _key: "continent-south-america", name: "South America", type: "continent" }); - -// vertices: countries -db.v.save({ _key: "country-afghanistan", name: "Afghanistan", type: "country", code: "AFG" }); -db.v.save({ _key: "country-albania", name: "Albania", type: "country", code: "ALB" }); -db.v.save({ _key: "country-algeria", name: "Algeria", type: "country", code: "DZA" }); -db.v.save({ _key: "country-andorra", name: "Andorra", type: "country", code: "AND" }); -db.v.save({ _key: "country-angola", name: "Angola", type: "country", code: "AGO" }); -db.v.save({ _key: "country-antigua-and-barbuda", name: "Antigua and Barbuda", type: "country", code: "ATG" }); -db.v.save({ _key: "country-argentina", name: "Argentina", type: "country", code: "ARG" }); -db.v.save({ _key: "country-australia", name: "Australia", type: "country", code: "AUS" }); -db.v.save({ _key: "country-austria", name: "Austria", type: "country", code: "AUT" }); -db.v.save({ _key: "country-bahamas", name: "Bahamas", type: "country", code: "BHS" }); -db.v.save({ _key: "country-bahrain", name: "Bahrain", type: "country", code: "BHR" }); -db.v.save({ _key: "country-bangladesh", name: "Bangladesh", type: "country", code: "BGD" }); -db.v.save({ _key: "country-barbados", name: "Barbados", type: "country", code: "BRB" }); -db.v.save({ _key: "country-belgium", name: "Belgium", type: "country", code: "BEL" }); -db.v.save({ _key: "country-bhutan", name: "Bhutan", type: "country", code: "BTN" }); -db.v.save({ _key: "country-bolivia", name: "Bolivia", type: "country", code: "BOL" }); -db.v.save({ _key: "country-bosnia-and-herzegovina", name: "Bosnia and Herzegovina", type: "country", code: "BIH" }); -db.v.save({ _key: "country-botswana", name: "Botswana", type: "country", code: "BWA" }); -db.v.save({ _key: "country-brazil", name: "Brazil", type: "country", code: "BRA" }); -db.v.save({ _key: "country-brunei", name: "Brunei", type: "country", code: "BRN" }); -db.v.save({ _key: "country-bulgaria", name: "Bulgaria", type: "country", code: "BGR" }); -db.v.save({ _key: "country-burkina-faso", name: "Burkina Faso", type: "country", code: "BFA" }); -db.v.save({ _key: "country-burundi", name: "Burundi", type: "country", code: "BDI" }); -db.v.save({ _key: "country-cambodia", name: "Cambodia", type: "country", code: "KHM" }); -db.v.save({ _key: "country-cameroon", name: "Cameroon", type: "country", code: "CMR" }); -db.v.save({ _key: "country-canada", name: "Canada", type: "country", code: "CAN" }); -db.v.save({ _key: "country-chad", name: "Chad", type: "country", code: "TCD" }); -db.v.save({ _key: "country-chile", name: "Chile", type: "country", code: "CHL" }); -db.v.save({ _key: "country-colombia", name: "Colombia", type: "country", code: "COL" }); -db.v.save({ _key: "country-cote-d-ivoire", name: "Cote d'Ivoire", type: "country", code: "CIV" }); -db.v.save({ _key: "country-croatia", name: "Croatia", type: "country", code: "HRV" }); -db.v.save({ _key: "country-czech-republic", name: "Czech Republic", type: "country", code: "CZE" }); -db.v.save({ _key: "country-denmark", name: "Denmark", type: "country", code: "DNK" }); -db.v.save({ _key: "country-ecuador", name: "Ecuador", type: "country", code: "ECU" }); -db.v.save({ _key: "country-egypt", name: "Egypt", type: "country", code: "EGY" }); -db.v.save({ _key: "country-eritrea", name: "Eritrea", type: "country", code: "ERI" }); -db.v.save({ _key: "country-finland", name: "Finland", type: "country", code: "FIN" }); -db.v.save({ _key: "country-france", name: "France", type: "country", code: "FRA" }); -db.v.save({ _key: "country-germany", name: "Germany", type: "country", code: "DEU" }); -db.v.save({ _key: "country-people-s-republic-of-china", name: "People's Republic of China", type: "country", code: "CHN" }); - -// vertices: capitals -db.v.save({ _key: "capital-algiers", name: "Algiers", type: "capital" }); -db.v.save({ _key: "capital-andorra-la-vella", name: "Andorra la Vella", type: "capital" }); -db.v.save({ _key: "capital-asmara", name: "Asmara", type: "capital" }); -db.v.save({ _key: "capital-bandar-seri-begawan", name: "Bandar Seri Begawan", type: "capital" }); -db.v.save({ _key: "capital-beijing", name: "Beijing", type: "capital" }); -db.v.save({ _key: "capital-berlin", name: "Berlin", type: "capital" }); -db.v.save({ _key: "capital-bogota", name: "Bogota", type: "capital" }); -db.v.save({ _key: "capital-brasilia", name: "Brasilia", type: "capital" }); -db.v.save({ _key: "capital-bridgetown", name: "Bridgetown", type: "capital" }); -db.v.save({ _key: "capital-brussels", name: "Brussels", type: "capital" }); -db.v.save({ _key: "capital-buenos-aires", name: "Buenos Aires", type: "capital" }); -db.v.save({ _key: "capital-bujumbura", name: "Bujumbura", type: "capital" }); -db.v.save({ _key: "capital-cairo", name: "Cairo", type: "capital" }); -db.v.save({ _key: "capital-canberra", name: "Canberra", type: "capital" }); -db.v.save({ _key: "capital-copenhagen", name: "Copenhagen", type: "capital" }); -db.v.save({ _key: "capital-dhaka", name: "Dhaka", type: "capital" }); -db.v.save({ _key: "capital-gaborone", name: "Gaborone", type: "capital" }); -db.v.save({ _key: "capital-helsinki", name: "Helsinki", type: "capital" }); -db.v.save({ _key: "capital-kabul", name: "Kabul", type: "capital" }); -db.v.save({ _key: "capital-la-paz", name: "La Paz", type: "capital" }); -db.v.save({ _key: "capital-luanda", name: "Luanda", type: "capital" }); -db.v.save({ _key: "capital-manama", name: "Manama", type: "capital" }); -db.v.save({ _key: "capital-nassau", name: "Nassau", type: "capital" }); -db.v.save({ _key: "capital-n-djamena", name: "N'Djamena", type: "capital" }); -db.v.save({ _key: "capital-ottawa", name: "Ottawa", type: "capital" }); -db.v.save({ _key: "capital-ouagadougou", name: "Ouagadougou", type: "capital" }); -db.v.save({ _key: "capital-paris", name: "Paris", type: "capital" }); -db.v.save({ _key: "capital-phnom-penh", name: "Phnom Penh", type: "capital" }); -db.v.save({ _key: "capital-prague", name: "Prague", type: "capital" }); -db.v.save({ _key: "capital-quito", name: "Quito", type: "capital" }); -db.v.save({ _key: "capital-saint-john-s", name: "Saint John's", type: "capital" }); -db.v.save({ _key: "capital-santiago", name: "Santiago", type: "capital" }); -db.v.save({ _key: "capital-sarajevo", name: "Sarajevo", type: "capital" }); -db.v.save({ _key: "capital-sofia", name: "Sofia", type: "capital" }); -db.v.save({ _key: "capital-thimphu", name: "Thimphu", type: "capital" }); -db.v.save({ _key: "capital-tirana", name: "Tirana", type: "capital" }); -db.v.save({ _key: "capital-vienna", name: "Vienna", type: "capital" }); -db.v.save({ _key: "capital-yamoussoukro", name: "Yamoussoukro", type: "capital" }); -db.v.save({ _key: "capital-yaounde", name: "Yaounde", type: "capital" }); -db.v.save({ _key: "capital-zagreb", name: "Zagreb", type: "capital" }); - -// edges: continent -> world -db.e.save("v/continent-africa", "v/world", { type: "is-in" }); -db.e.save("v/continent-asia", "v/world", { type: "is-in" }); -db.e.save("v/continent-australia", "v/world", { type: "is-in" }); -db.e.save("v/continent-europe", "v/world", { type: "is-in" }); -db.e.save("v/continent-north-america", "v/world", { type: "is-in" }); -db.e.save("v/continent-south-america", "v/world", { type: "is-in" }); - -// edges: country -> continent -db.e.save("v/country-afghanistan", "v/continent-asia", { type: "is-in" }); -db.e.save("v/country-albania", "v/continent-europe", { type: "is-in" }); -db.e.save("v/country-algeria", "v/continent-africa", { type: "is-in" }); -db.e.save("v/country-andorra", "v/continent-europe", { type: "is-in" }); -db.e.save("v/country-angola", "v/continent-africa", { type: "is-in" }); -db.e.save("v/country-antigua-and-barbuda", "v/continent-north-america", { type: "is-in" }); -db.e.save("v/country-argentina", "v/continent-south-america", { type: "is-in" }); -db.e.save("v/country-australia", "v/continent-australia", { type: "is-in" }); -db.e.save("v/country-austria", "v/continent-europe", { type: "is-in" }); -db.e.save("v/country-bahamas", "v/continent-north-america", { type: "is-in" }); -db.e.save("v/country-bahrain", "v/continent-asia", { type: "is-in" }); -db.e.save("v/country-bangladesh", "v/continent-asia", { type: "is-in" }); -db.e.save("v/country-barbados", "v/continent-north-america", { type: "is-in" }); -db.e.save("v/country-belgium", "v/continent-europe", { type: "is-in" }); -db.e.save("v/country-bhutan", "v/continent-asia", { type: "is-in" }); -db.e.save("v/country-bolivia", "v/continent-south-america", { type: "is-in" }); -db.e.save("v/country-bosnia-and-herzegovina", "v/continent-europe", { type: "is-in" }); -db.e.save("v/country-botswana", "v/continent-africa", { type: "is-in" }); -db.e.save("v/country-brazil", "v/continent-south-america", { type: "is-in" }); -db.e.save("v/country-brunei", "v/continent-asia", { type: "is-in" }); -db.e.save("v/country-bulgaria", "v/continent-europe", { type: "is-in" }); -db.e.save("v/country-burkina-faso", "v/continent-africa", { type: "is-in" }); -db.e.save("v/country-burundi", "v/continent-africa", { type: "is-in" }); -db.e.save("v/country-cambodia", "v/continent-asia", { type: "is-in" }); -db.e.save("v/country-cameroon", "v/continent-africa", { type: "is-in" }); -db.e.save("v/country-canada", "v/continent-north-america", { type: "is-in" }); -db.e.save("v/country-chad", "v/continent-africa", { type: "is-in" }); -db.e.save("v/country-chile", "v/continent-south-america", { type: "is-in" }); -db.e.save("v/country-colombia", "v/continent-south-america", { type: "is-in" }); -db.e.save("v/country-cote-d-ivoire", "v/continent-africa", { type: "is-in" }); -db.e.save("v/country-croatia", "v/continent-europe", { type: "is-in" }); -db.e.save("v/country-czech-republic", "v/continent-europe", { type: "is-in" }); -db.e.save("v/country-denmark", "v/continent-europe", { type: "is-in" }); -db.e.save("v/country-ecuador", "v/continent-south-america", { type: "is-in" }); -db.e.save("v/country-egypt", "v/continent-africa", { type: "is-in" }); -db.e.save("v/country-eritrea", "v/continent-africa", { type: "is-in" }); -db.e.save("v/country-finland", "v/continent-europe", { type: "is-in" }); -db.e.save("v/country-france", "v/continent-europe", { type: "is-in" }); -db.e.save("v/country-germany", "v/continent-europe", { type: "is-in" }); -db.e.save("v/country-people-s-republic-of-china", "v/continent-asia", { type: "is-in" }); - -// edges: capital -> country -db.e.save("v/capital-algiers", "v/country-algeria", { type: "is-in" }); -db.e.save("v/capital-andorra-la-vella", "v/country-andorra", { type: "is-in" }); -db.e.save("v/capital-asmara", "v/country-eritrea", { type: "is-in" }); -db.e.save("v/capital-bandar-seri-begawan", "v/country-brunei", { type: "is-in" }); -db.e.save("v/capital-beijing", "v/country-people-s-republic-of-china", { type: "is-in" }); -db.e.save("v/capital-berlin", "v/country-germany", { type: "is-in" }); -db.e.save("v/capital-bogota", "v/country-colombia", { type: "is-in" }); -db.e.save("v/capital-brasilia", "v/country-brazil", { type: "is-in" }); -db.e.save("v/capital-bridgetown", "v/country-barbados", { type: "is-in" }); -db.e.save("v/capital-brussels", "v/country-belgium", { type: "is-in" }); -db.e.save("v/capital-buenos-aires", "v/country-argentina", { type: "is-in" }); -db.e.save("v/capital-bujumbura", "v/country-burundi", { type: "is-in" }); -db.e.save("v/capital-cairo", "v/country-egypt", { type: "is-in" }); -db.e.save("v/capital-canberra", "v/country-australia", { type: "is-in" }); -db.e.save("v/capital-copenhagen", "v/country-denmark", { type: "is-in" }); -db.e.save("v/capital-dhaka", "v/country-bangladesh", { type: "is-in" }); -db.e.save("v/capital-gaborone", "v/country-botswana", { type: "is-in" }); -db.e.save("v/capital-helsinki", "v/country-finland", { type: "is-in" }); -db.e.save("v/capital-kabul", "v/country-afghanistan", { type: "is-in" }); -db.e.save("v/capital-la-paz", "v/country-bolivia", { type: "is-in" }); -db.e.save("v/capital-luanda", "v/country-angola", { type: "is-in" }); -db.e.save("v/capital-manama", "v/country-bahrain", { type: "is-in" }); -db.e.save("v/capital-nassau", "v/country-bahamas", { type: "is-in" }); -db.e.save("v/capital-n-djamena", "v/country-chad", { type: "is-in" }); -db.e.save("v/capital-ottawa", "v/country-canada", { type: "is-in" }); -db.e.save("v/capital-ouagadougou", "v/country-burkina-faso", { type: "is-in" }); -db.e.save("v/capital-paris", "v/country-france", { type: "is-in" }); -db.e.save("v/capital-phnom-penh", "v/country-cambodia", { type: "is-in" }); -db.e.save("v/capital-prague", "v/country-czech-republic", { type: "is-in" }); -db.e.save("v/capital-quito", "v/country-ecuador", { type: "is-in" }); -db.e.save("v/capital-saint-john-s", "v/country-antigua-and-barbuda", { type: "is-in" }); -db.e.save("v/capital-santiago", "v/country-chile", { type: "is-in" }); -db.e.save("v/capital-sarajevo", "v/country-bosnia-and-herzegovina", { type: "is-in" }); -db.e.save("v/capital-sofia", "v/country-bulgaria", { type: "is-in" }); -db.e.save("v/capital-thimphu", "v/country-bhutan", { type: "is-in" }); -db.e.save("v/capital-tirana", "v/country-albania", { type: "is-in" }); -db.e.save("v/capital-vienna", "v/country-austria", { type: "is-in" }); -db.e.save("v/capital-yamoussoukro", "v/country-cote-d-ivoire", { type: "is-in" }); -db.e.save("v/capital-yaounde", "v/country-cameroon", { type: "is-in" }); -db.e.save("v/capital-zagreb", "v/country-croatia", { type: "is-in" }); -``` diff --git a/Documentation/Books/Manual/Graphs/Traversals/README.md b/Documentation/Books/Manual/Graphs/Traversals/README.md deleted file mode 100644 index a859f8396773..000000000000 --- a/Documentation/Books/Manual/Graphs/Traversals/README.md +++ /dev/null @@ -1,62 +0,0 @@ -Traversals -========== - -ArangoDB provides [several ways to query graph data](../README.md). -Very simple operations can be composed with the low-level edge methods *edges*, *inEdges*, and *outEdges* for -[edge collections](../Edges/README.md). These work on named and anonymous graphs. For more complex operations, -ArangoDB provides predefined traversal objects. - -Traversals have also been added to AQL. -Please read the [chapter about AQL traversals](../../../AQL/Graphs/Traversals.html) before you continue reading here. -Most of the traversal cases are covered by AQL and will be executed in an optimized way. -Only if the logic for your is too complex to be defined using AQL filters you can use the traversal object defined -here which gives you complete programmatic access to the data. - -For any of the following examples, we'll be using the example collections *v* and *e*, -populated with continents, countries and capitals data listed below (see [Example Data](ExampleData.md)). - -To learn more about graphs in ArangoDB take the free -[ArangoDB Graph Course](https://www.arangodb.com/arangodb-graph-course). - -Starting from Scratch ---------------------- - -ArangoDB provides the *edges*, *inEdges*, and *outEdges* methods for edge collections. -These methods can be used to quickly determine if a vertex is connected to other vertices, -and which. -This functionality can be exploited to write very simple graph queries in JavaScript. - -For example, to determine which edges are linked to the *world* vertex, we can use *inEdges*: - -```js -db.e.inEdges('v/world').forEach(function(edge) { - require("@arangodb").print(edge._from, "->", edge.type, "->", edge._to); -}); -``` - -*inEdges* will give us all ingoing edges for the specified vertex *v/world*. The result -is a JavaScript array that we can iterate over and print the results: - -```js -v/continent-africa -> is-in -> v/world -v/continent-south-america -> is-in -> v/world -v/continent-asia -> is-in -> v/world -v/continent-australia -> is-in -> v/world -v/continent-europe -> is-in -> v/world -v/continent-north-america -> is-in -> v/world -``` - -**Note**: *edges*, *inEdges*, and *outEdges* return an array of edges. If we want to retrieve -the linked vertices, we can use each edges' *_from* and *_to* attributes as follows: - -```js -db.e.inEdges('v/world').forEach(function(edge) { - require("@arangodb").print(db._document(edge._from).name, "->", edge.type, "->", db._document(edge._to).name); -}); -``` - -We are using the *document* method from the *db* object to retrieve the connected vertices now. - -While this may be sufficient for one-level graph operations, writing a traversal by hand -may become too complex for multi-level traversals. - diff --git a/Documentation/Books/Manual/Graphs/Traversals/UsingTraversalObjects.md b/Documentation/Books/Manual/Graphs/Traversals/UsingTraversalObjects.md deleted file mode 100644 index cd7172dd34d0..000000000000 --- a/Documentation/Books/Manual/Graphs/Traversals/UsingTraversalObjects.md +++ /dev/null @@ -1,840 +0,0 @@ -Using Traversal Objects -======================= - -{% hint 'warning' %} -The JavaScript module `@arangodb/graph/traversal` (*traversal module* for short) -is deprecated from version 3.4.0 on. The preferred way to traverse graphs is with AQL. -{% endhint %} - -To use a traversal object, we first need to require the *traversal* module: - - -```js -var traversal = require("@arangodb/graph/traversal"); -var examples = require("@arangodb/graph-examples/example-graph.js"); -examples.loadGraph("worldCountry"); -``` - -We then need to setup a configuration for the traversal and determine at which vertex to -start the traversal: - -```js -var config = { - datasource: traversal.generalGraphDatasourceFactory("worldCountry"), - strategy: "depthfirst", - order: "preorder", - filter: traversal.visitAllFilter, - expander: traversal.inboundExpander, - maxDepth: 1 -}; - -var startVertex = db._document("v/world"); -``` - -**Note**: The startVertex needs to be a document, not only a document id. - -We can then create a traverser and start the traversal by calling its *traverse* method. -Note that *traverse* needs a *result* object, which it can modify in place: - -```js -var result = { - visited: { - vertices: [ ], - paths: [ ] - } -}; -var traverser = new traversal.Traverser(config); -traverser.traverse(result, startVertex); -``` - -Finally, we can print the contents of the *results* object, limited to the visited vertices. -We will only print the name and type of each visited vertex for brevity: - -```js -require("@arangodb").print(result.visited.vertices.map(function(vertex) { - return vertex.name + " (" + vertex.type + ")"; -})); -``` - - -The full script, which includes all steps carried out so far is thus: - -```js -var traversal = require("@arangodb/graph/traversal"); - -var config = { - datasource: traversal.generalGraphDatasourceFactory("worldCountry"), - strategy: "depthfirst", - order: "preorder", - filter: traversal.visitAllFilter, - expander: traversal.inboundExpander, - maxDepth: 1 -}; - -var startVertex = db._document("v/world"); -var result = { - visited: { - vertices: [ ], - paths: [ ] - } -}; - -var traverser = new traversal.Traverser(config); -traverser.traverse(result, startVertex); - -require("@arangodb").print(result.visited.vertices.map(function(vertex) { - return vertex.name + " (" + vertex.type + ")"; -})); -``` - -The result is an array of vertices that were visited during the traversal, starting at the -start vertex (i.e. *v/world* in our example): - -```js -[ - "World (root)", - "Africa (continent)", - "Asia (continent)", - "Australia (continent)", - "Europe (continent)", - "North America (continent)", - "South America (continent)" -] -``` - -**Note**: The result is limited to vertices directly connected to the start vertex. We -achieved this by setting the *maxDepth* attribute to *1*. Not setting it would return the -full array of vertices. - -Traversal Direction -------------------- - -For the examples contained in this manual, we'll be starting the traversals at vertex -*v/world*. Vertices in our graph are connected like this: - -```js -v/world <- is-in <- continent (Africa) <- is-in <- country (Algeria) <- is-in <- capital (Algiers) -``` - -To get any meaningful results, we must traverse the graph in **inbound** order. This means, -we'll be following all incoming edges of to a vertex. In the traversal configuration, we -have specified this via the *expander* attribute: - -```js -var config = { - ... - expander: traversal.inboundExpander -}; -``` - -For other graphs, we might want to traverse via the **outgoing** edges. For this, we can -use the *outboundExpander*. There is also an *anyExpander*, which will follow both outgoing -and incoming edges. This should be used with care and the traversal should always be -limited to a maximum number of iterations (e.g. using the *maxIterations* attribute) in -order to terminate at some point. - -To invoke the default outbound expander for a graph, simply use the predefined function: - -```js -var config = { - ... - expander: traversal.outboundExpander -}; -``` - -Please note the outbound expander will not produce any output for the examples if we still -start the traversal at the *v/world* vertex. - -Still, we can use the outbound expander if we start somewhere else in the graph, e.g. - -```js -var traversal = require("@arangodb/graph/traversal"); - -var config = { - datasource: traversal.generalGraphDatasourceFactory("world_graph"), - strategy: "depthfirst", - order: "preorder", - filter: traversal.visitAllFilter, - expander: traversal.outboundExpander -}; - -var startVertex = db._document("v/capital-algiers"); -var result = { - visited: { - vertices: [ ], - paths: [ ] - } -}; - -var traverser = new traversal.Traverser(config); -traverser.traverse(result, startVertex); - -require("@arangodb").print(result.visited.vertices.map(function(vertex) { - return vertex.name + " (" + vertex.type + ")"; -})); -``` - -The result is: - -```js -[ - "Algiers (capital)", - "Algeria (country)", - "Africa (continent)", - "World (root)" -] -``` - -which confirms that now we're going outbound. - -Traversal Strategy ------------------- - -### Depth-first traversals - -The visitation order of vertices is determined by the *strategy* and *order* attributes set -in the configuration. We chose *depthfirst* and *preorder*, meaning the traverser will -visit each vertex **before** handling connected edges (pre-order), and descend into any -connected edges before processing other vertices on the same level (depth-first). - -Let's remove the *maxDepth* attribute now. We'll now be getting all vertices (directly -and indirectly connected to the start vertex): - -```js -var config = { - datasource: traversal.generalGraphDatasourceFactory("world_graph"), - strategy: "depthfirst", - order: "preorder", - filter: traversal.visitAllFilter, - expander: traversal.inboundExpander -}; - -var result = { - visited: { - vertices: [ ], - paths: [ ] - } -}; - -var traverser = new traversal.Traverser(config); -traverser.traverse(result, startVertex); - -require("@arangodb").print(result.visited.vertices.map(function(vertex) { - return vertex.name + " (" + vertex.type + ")"; -})); -``` - -The result will be a longer array, assembled in depth-first, pre-order order. For -each continent found, the traverser will descend into linked countries, and then into -the linked capital: - -```js -[ - "World (root)", - "Africa (continent)", - "Algeria (country)", - "Algiers (capital)", - "Angola (country)", - "Luanda (capital)", - "Botswana (country)", - "Gaborone (capital)", - "Burkina Faso (country)", - "Ouagadougou (capital)", - ... -] -``` - -Let's switch the *order* attribute from *preorder* to *postorder*. This will make the -traverser visit vertices **after** all connected vertices were visited (i.e. most distant -vertices will be emitted first): - -```js -[ - "Algiers (capital)", - "Algeria (country)", - "Luanda (capital)", - "Angola (country)", - "Gaborone (capital)", - "Botswana (country)", - "Ouagadougou (capital)", - "Burkina Faso (country)", - "Bujumbura (capital)", - "Burundi (country)", - "Yaounde (capital)", - "Cameroon (country)", - "N'Djamena (capital)", - "Chad (country)", - "Yamoussoukro (capital)", - "Cote d'Ivoire (country)", - "Cairo (capital)", - "Egypt (country)", - "Asmara (capital)", - "Eritrea (country)", - "Africa (continent)", - ... -] -``` - -### Breadth-first traversals - -If we go back to *preorder*, but change the strategy to *breadth-first* and re-run the -traversal, we'll see that the return order changes, and items on the same level will be -returned adjacently: - -```js -[ - "World (root)", - "Africa (continent)", - "Asia (continent)", - "Australia (continent)", - "Europe (continent)", - "North America (continent)", - "South America (continent)", - "Burkina Faso (country)", - "Burundi (country)", - "Cameroon (country)", - "Chad (country)", - "Algeria (country)", - "Angola (country)", - ... -] -``` - -**Note**: The order of items returned for the same level is undefined. -This is because there is no natural order of edges for a vertex with -multiple connected edges. To explicitly set the order for edges on the -same level, you can specify an edge comparator function with the *sort* -attribute: - -```js -var config = { - ... - sort: function (l, r) { return l._key < r._key ? 1 : -1; } - ... -}; -``` - -The arguments l and r are edge documents. -This will traverse edges of the same vertex in backward *_key* order: - -```js -[ - "World (root)", - "South America (continent)", - "North America (continent)", - "Europe (continent)", - "Australia (continent)", - "Asia (continent)", - "Africa (continent)", - "Ecuador (country)", - "Colombia (country)", - "Chile (country)", - "Brazil (country)", - "Bolivia (country)", - "Argentina (country)", - ... -] -``` - -**Note**: This attribute only works for the usual expanders -*traversal.inboundExpander*, *traversal.outboundExpander*, -*traversal.anyExpander* and their corresponding "WithLabels" variants. -If you are using custom expanders -you have to organize the sorting within the specified expander. - -### Writing Custom Visitors - -So far we have used much of the traverser's default functions. The traverser is very -configurable and many of the default functions can be overridden with custom functionality. - -For example, we have been using the default visitor function (which is always used if -the configuration does not contain the *visitor* attribute). The default visitor function -is called for each vertex in a traversal, and will push it into the result. -This is the reason why the *result* variable looked different after the traversal, and -needed to be initialized before the traversal was started. - -Note that the default visitor (named `trackingVisitor`) will add every visited vertex -into the result, including the full paths from the start vertex. This is useful for learning and -debugging purposes, but should be avoided in production because it might produce (and -copy) huge amounts of data. Instead, only those data should be copied into the result -that are actually necessary. - -The traverser comes with the following predefined visitors: -- *trackingVisitor*: this is the default visitor. It will copy all data of each visited - vertex plus the full path information into the result. This can be slow if the result - set is huge or vertices contain a lot of data. -- *countingVisitor*: this is a very lightweight visitor: all it does is increase a - counter in the result for each vertex visited. Vertex data and paths will not be copied - into the result. -- *doNothingVisitor*: if no action shall be carried out when a vertex is visited, this - visitor can be employed. It will not do anything and will thus be fast. It can be used - for performance comparisons with other visitors. - -We can also write our own visitor function if we want to. The general function signature for -visitor functions is as follows: - -```js -var config = { - ... - visitor: function (config, result, vertex, path, connected) { ... } -}; -``` - -Note: the *connected* parameter value will only be set if the traversal order is -set to *preorder-expander*. Otherwise, this parameter won't be set by the traverser. - -Visitor functions are not expected to return any values. Instead, they can modify the -*result* variable (e.g. by pushing the current vertex into it), or do anything else. -For example, we can create a simple visitor function that only prints information about -the current vertex as we traverse: - -```js -var config = { - datasource: traversal.generalGraphDatasourceFactory("world_graph"), - strategy: "depthfirst", - order: "preorder", - filter: traversal.visitAllFilter, - expander: traversal.inboundExpander, - visitor: function (config, result, vertex, path) { - require("@arangodb").print("visiting vertex", vertex.name); - } -}; - -var traverser = new traversal.Traverser(config); -traverser.traverse(undefined, startVertex); -``` - -To write a visitor that increments a counter each time a vertex is visited, -we could write the following custom visitor: - -```js -config.visitor = function (config, result, vertex, path, connected) { - if (! result) { - result = { }; - } - - if (! result.hasOwnProperty('count')) { - result.count = 0; - } - - ++result.count; -} -``` - -Note that such visitor is already predefined (it's the countingVisitor described -above). It can be used as follows: -```js -config.visitor = traversal.countingVisitor; -``` - -Another example of a visitor is one that collects the `_id` values of all vertices -visited: - -```js -config.visitor = function (config, result, vertex, path, connected) { - if (! result) { - result = { }; - } - if (! result.hasOwnProperty("visited")) { - result.visited = { vertices: [ ] }; - } - - result.visited.vertices.push(vertex._id); -} -``` - -When the traversal order is set to *preorder-expander*, the traverser will pass -a fifth parameter value into the visitor function. This parameter contains the -connected edges of the visited vertex as an array. This can be handy because in this -case the visitor will get all information about the vertex and the connected edges -together. - -For example, the following visitor can be used to print only leaf nodes (that -do not have any further connected edges): - -```js -config.visitor = function (config, result, vertex, path, connected) { - if (connected && connected.length === 0) { - require("@arangodb").print("found a leaf-node: ", vertex); - } -} -``` - -Note that for this visitor to work, the traversal *order* attribute needs to be -set to the value *preorder-expander*. - -Filtering Vertices and Edges ----------------------------- - -### Filtering Vertices - -So far we have printed or returned all vertices that were visited during the traversal. -This is not always required. If the result shall be restrict to just specific vertices, -we can use a filter function for vertices. It can be defined by setting the *filter* -attribute of a traversal configuration, e.g.: - -```js -var config = { - filter: function (config, vertex, path) { - if (vertex.type !== 'capital') { - return 'exclude'; - } - } -} -``` - -The above filter function will exclude all vertices that do not have a *type* value of -*capital*. The filter function will be called for each vertex found during the traversal. -It will receive the traversal configuration, the current vertex, and the full path from -the traversal start vertex to the current vertex. The path consists of an array of edges, -and an array of vertices. We could also filter everything but capitals by checking the -length of the path from the start vertex to the current vertex. Capitals will have a -distance of 3 from the *v/world* start vertex -(capital → is-in → country → is-in → continent → is-in → world): - -```js -var config = { - ... - filter: function (config, vertex, path) { - if (path.edges.length < 3) { - return 'exclude'; - } - } -} -``` - -**Note**: If a filter function returns nothing (or *undefined*), the current vertex -will be included, and all connected edges will be followed. If a filter function -returns *exclude* the current vertex will be excluded from the result, and all still -all connected edges will be followed. If a filter function returns *prune*, the -current vertex will be included, but no connected edges will be followed. - -For example, the following filter function will not descend into connected edges of -continents, limiting the depth of the traversal. Still, continent vertices will be -included in the result: - -```js -var config = { - ... - filter: function (config, vertex, path) { - if (vertex.type === 'continent') { - return 'prune'; - } - } -} -``` - -It is also possible to combine *exclude* and *prune* by returning an array with both -values: - -```js -return [ 'exclude', 'prune' ]; -``` - -Filtering Edges ---------------- - -It is possible to exclude certain edges from the traversal. To filter on edges, a -filter function can be defined via the *expandFilter* attribute. The *expandFilter* -is a function which is called for each edge during a traversal. - -It will receive the current edge (*edge* variable) and the vertex which the edge -connects to (in the direction of the traversal). It also receives the current path -from the start vertex up to the current vertex (excluding the current edge and the -vertex the edge points to). - -If the function returns *true*, the edge will be followed. If the function returns -*false*, the edge will not be followed. -Here is a very simple custom edge filter function implementation, which simply -includes edges if the (edges) path length is less than 1, and will exclude any -other edges. This will effectively terminate the traversal after the first level -of edges: - -```js -var config = { - ... - expandFilter: function (config, vertex, edge, path) { - return (path.edges.length < 1); - } -}; -``` - -Writing Custom Expanders ------------------------- - -The edges connected to a vertex are determined by the expander. So far we have used a -default expander (the default inbound expander to be precise). The default inbound -expander simply enumerates all connected ingoing edges for a vertex, based on the -[edge collection](../../Appendix/Glossary.md#edge-collection) specified in the traversal configuration. - -There is also a default outbound expander, which will enumerate all connected outgoing -edges. Finally, there is an any expander, which will follow both ingoing and outgoing -edges. - -If connected edges must be determined in some different fashion for whatever reason, a -custom expander can be written and registered by setting the *expander* attribute of the -configuration. The expander function signature is as follows: - -```js -var config = { - ... - expander: function (config, vertex, path) { ... } -} -``` - -It is the expander's responsibility to return all edges and vertices directly -connected to the current vertex (which is passed via the *vertex* variable). -The full path from the start vertex up to the current vertex is also supplied via -the *path* variable. -An expander is expected to return an array of objects, which need to have an *edge* -and a *vertex* attribute each. - -**Note**: If you want to rely on a particular order in which the edges -are traversed, you have to sort the edges returned by your expander -within the code of the expander. The functions to get outbound, inbound -or any edges from a vertex do not guarantee any particular order! - -A custom implementation of an inbound expander could look like this (this is a -non-deterministic expander, which randomly decides whether or not to include -connected edges): - -```js -var config = { - ... - expander: function (config, vertex, path) { - var connected = [ ]; - var datasource = config.datasource; - datasource.getInEdges(vertex._id).forEach(function (edge) { - if (Math.random() >= 0.5) { - connected.push({ edge: edge, vertex: (edge._from) }); - } - }); - return connected; - } -}; -``` - -A custom expander can also be used as an edge filter because it has full control -over which edges will be returned. - -Following are two examples of custom expanders that pick edges based on attributes -of the edges and the connected vertices. - -Finding the connected edges / vertices based on an attribute *when* in the -connected vertices. The goal is to follow the edge that leads to the vertex -with the highest value in the *when* attribute: - -```js -var config = { - ... - expander: function (config, vertex, path) { - var datasource = config.datasource; - // determine all outgoing edges - var outEdges = datasource.getOutEdges(vertex); - - if (outEdges.length === 0) { - return [ ]; - } - - var data = [ ]; - outEdges.forEach(function (edge) { - data.push({ edge: edge, vertex: datasource.getInVertex(edge) }); - }); - - // sort outgoing vertices according to "when" attribute value - data.sort(function (l, r) { - if (l.vertex.when === r.vertex.when) { - return 0; - } - - return (l.vertex.when < r.vertex.when ? 1 : -1); - }); - - // pick first vertex found (with highest "when" attribute value) - return [ data[0] ]; - } - ... -}; -``` - -Finding the connected edges / vertices based on an attribute *when* in the -edge itself. The goal is to pick the one edge (out of potentially many) that -has the highest *when* attribute value: - -```js -var config = { - ... - expander: function (config, vertex, path) { - var datasource = config.datasource; - // determine all outgoing edges - var outEdges = datasource.getOutEdges(vertex); - - if (outEdges.length === 0) { - return [ ]; // return an empty array - } - - // sort all outgoing edges according to "when" attribute - outEdges.sort(function (l, r) { - if (l.when === r.when) { - return 0; - } - return (l.when < r.when ? -1 : 1); - }); - - // return first edge (the one with highest "when" value) - var edge = outEdges[0]; - try { - var v = datasource.getInVertex(edge); - return [ { edge: edge, vertex: v } ]; - } - catch (e) { } - - return [ ]; - } - ... -}; -``` - - -Handling Uniqueness -------------------- - -Graphs may contain cycles. To be on top of what happens when a traversal encounters a vertex -or an edge it has already visited, there are configuration options. - -The default configuration is to visit every vertex, regardless of whether it was already visited -in the same traversal. However, edges will by default only be followed if they are not already -present in the current path. - -Imagine the following graph which contains a cycle: - -``` -A -> B -> C -> A -``` - -When the traversal finds the edge from *C* to *A*, it will by default follow it. This is because -we have not seen this edge yet. It will also visit vertex *A* again. This is because by default -all vertices will be visited, regardless of whether already visited or not. - -However, the traversal will not again following the outgoing edge from *A* to *B*. This is because -we already have the edge from *A* to *B* in our current path. - -These default settings will prevent infinite traversals. - -To adjust the uniqueness for visiting vertices, there are the following options for *uniqueness.vertices*: - -* *"none"*: always visit a vertices, regardless of whether it was already visited or not -* *"global"*: visit a vertex only if it was not visited in the traversal -* *"path"*: visit a vertex if it is not included in the current path - -To adjust the uniqueness for following edges, there are the following options for *uniqueness.edges*: - -* *"none"*: always follow an edge, regardless of whether it was followed before -* *"global"*: follow an edge only if it wasn't followed in the traversal -* *"path"*: follow an edge if it is not included in the current path - -Note that uniqueness checking will have some effect on both runtime and memory usage. For example, when -uniqueness checks are set to *"global"*, arrays of visited vertices and edges must be kept in memory while the -traversal is executed. Global uniqueness should thus only be used when a traversal is expected to visit -few nodes. - -In terms of runtime, turning off uniqueness checks (by setting both options to *"none"*) is the best choice, -but it is only safe for graphs that do not contain cycles. When uniqueness checks are deactivated in a graph -with cycles, the traversal might not abort in a sensible amount of time. - - -Optimizations -------------- - -There are a few options for making a traversal run faster. - -The best option is to make the amount of visited vertices and followed edges as small as possible. This can -be achieved by writing custom filter and expander functions. Such functions should only include vertices of -interest, and only follow edges that might be interesting. - -Traversal depth can also be bounded with the *minDepth* and *maxDepth* options. - -Another way to speed up traversals is to write a custom visitor function. The default visitor function -(*trackingVisitor*) will copy every visited vertex into the result. If vertices contain lots of data, -this might be expensive. It is therefore recommended to only copy such data into the result that is actually -needed. The default visitor function will also copy the full path to the visited document into the result. -This is even more expensive and should be avoided if possible. - -If the goal of a traversal is to only count the number of visited vertices, the prefab *countingVisitor* -will be much more efficient than the default visitor. - -For graphs that are known to not contain any cycles, uniqueness checks should be turned off. This can achieved -via the *uniqueness* configuration options. Note that uniqueness checks should not be turned off for graphs -that are known contain cycles or if there is no information about the graph's structure. - -By default, a traversal will only process a limited number of vertices. This is protect the user from -unintentionally run a never-ending traversal on a graph with cyclic data. How many vertices will be processed -at most is determined by the *maxIterations* configuration option. If a traversal hits the cap specified -by *maxIterations*, it will abort and throw a *too many iterations* exception. If this error is encountered, -the *maxIterations* value should be increased if it is made sure that the other traversal configuration -parameters are sane and the traversal will abort naturally at some point. - -Finally, the *buildVertices* configuration option can be set to *false* to avoid looking up and fully constructing -vertex data. If all that's needed from vertices are the *_id* or *_key* attributes, the *buildvertices* -option can be set to *false*. If visitor, filter or expandFilter functions need to access other vertex -attributes, the option should not be changed. - - -Configuration Overview ----------------------- - -This section summarizes the configuration attributes for the traversal object. The -configuration can consist of the following attributes: - -- *visitor*: visitor function for vertices. It will be called for all non-excluded vertices. The - general visitor function signature is *function (config, result, vertex, path)*. If the traversal - order is *preorder-expander*, the connecting edges of the visited vertex will be passed as the - fifth parameter, extending the function signature to: *function (config, result, vertex, path, edges)*. - - Visitor functions are not expected to return values, but they may modify the *result* variable as - needed (e.g. by pushing vertex data into the result). -- *expander*: expander function that is responsible for returning edges and vertices - directly connected to a vertex. The function signature is *function (config, vertex, path)*. - The expander function is required to return an array of connection objects, consisting of an - *edge* and *vertex* attribute each. If there are no connecting edges, the expander is expected to - return an empty array. -- *filter*: vertex filter function. The function signature is *function (config, vertex, path)*. It - may return one of the following values: - - *undefined*: vertex will be included in the result and connected edges will be traversed - - *"exclude"*: vertex will not be included in the result and connected edges will be traversed - - *"prune"*: vertex will be included in the result but connected edges will not be traversed - - [ *"prune"*, *"exclude"* ]: vertex will not be included in the result and connected edges will not - be returned -- *expandFilter*: filter function applied on each edge/vertex combination determined by the expander. - The function signature is *function (config, vertex, edge, path)*. The function should return - *true* if the edge/vertex combination should be processed, and *false* if it should be ignored. -- *sort*: a filter function to determine the order in which connected edges are processed. The - function signature is *function (l, r)*. The function is required to return one of the following - values: - - *-1* if *l* should have a sort value less than *r* - - *1* if *l* should have a higher sort value than *r* - - *0* if *l* and *r* have the same sort value -- *strategy*: determines the visitation strategy. Possible values are *depthfirst* and *breadthfirst*. -- *order*: determines the visitation order. Possible values are *preorder*, *postorder*, and - *preorder-expander*. *preorder-expander* is the same as *preorder*, except that the signature of - the *visitor* function will change as described above. -- *itemOrder*: determines the order in which connections returned by the expander will be processed. - Possible values are *forward* and *backward*. -- *maxDepth*: if set to a value greater than *0*, this will limit the traversal to this maximum depth. -- *minDepth*: if set to a value greater than *0*, all vertices found on a level below the *minDepth* - level will not be included in the result. -- *maxIterations*: the maximum number of iterations that the traversal is allowed to perform. It is - sensible to set this number so unbounded traversals will terminate at some point. -- *uniqueness*: an object that defines how repeated visitations of vertices should be handled. - The *uniqueness* object can have a sub-attribute *vertices*, and a sub-attribute *edges*. Each - sub-attribute can have one of the following values: - - *"none"*: no uniqueness constraints - - *"path"*: element is excluded if it is already contained in the current path. This setting may be - sensible for graphs that contain cycles (e.g. A → B → C → A). - - *"global"*: element is excluded if it was already found/visited at any point during the traversal. -- *buildVertices*: this attribute controls whether vertices encountered during the traversal will be - looked up in the database and will be made available to visitor, filter, and expandFilter functions. - By default, vertices will be looked up and made available. However, there are some special use - cases when fully constructing vertex objects is not necessary and can be avoided. For example, if - a traversal is meant to only count the number of visited vertices but do not read any data from - vertices, this option might be set to *true*. diff --git a/Documentation/Books/Manual/Graphs/cities_graph.png b/Documentation/Books/Manual/Graphs/cities_graph.png deleted file mode 100644 index 16c6771de32b..000000000000 Binary files a/Documentation/Books/Manual/Graphs/cities_graph.png and /dev/null differ diff --git a/Documentation/Books/Manual/Graphs/graph_user_in_group.ditaa b/Documentation/Books/Manual/Graphs/graph_user_in_group.ditaa deleted file mode 100644 index 8a3562a934cd..000000000000 --- a/Documentation/Books/Manual/Graphs/graph_user_in_group.ditaa +++ /dev/null @@ -1,6 +0,0 @@ - - Users UsersInGroups Groups - - +----------+ +----------------------+ - | John +---------------->| BowlingGroupHappyPin | - +----------+ +----------------------+ diff --git a/Documentation/Books/Manual/Graphs/knows_graph.png b/Documentation/Books/Manual/Graphs/knows_graph.png deleted file mode 100644 index ad94c51ce4af..000000000000 Binary files a/Documentation/Books/Manual/Graphs/knows_graph.png and /dev/null differ diff --git a/Documentation/Books/Manual/Graphs/mps_graph.png b/Documentation/Books/Manual/Graphs/mps_graph.png deleted file mode 100644 index f7bb0405ebc0..000000000000 Binary files a/Documentation/Books/Manual/Graphs/mps_graph.png and /dev/null differ diff --git a/Documentation/Books/Manual/Graphs/social_graph.png b/Documentation/Books/Manual/Graphs/social_graph.png deleted file mode 100644 index 7bf6d0bf64ce..000000000000 Binary files a/Documentation/Books/Manual/Graphs/social_graph.png and /dev/null differ diff --git a/Documentation/Books/Manual/Graphs/traversal_graph.png b/Documentation/Books/Manual/Graphs/traversal_graph.png deleted file mode 100644 index 1a0da00f2052..000000000000 Binary files a/Documentation/Books/Manual/Graphs/traversal_graph.png and /dev/null differ diff --git a/Documentation/Books/Manual/Graphs/world_graph.png b/Documentation/Books/Manual/Graphs/world_graph.png deleted file mode 100644 index d39c37d06e01..000000000000 Binary files a/Documentation/Books/Manual/Graphs/world_graph.png and /dev/null differ diff --git a/Documentation/Books/Manual/Highlights.md b/Documentation/Books/Manual/Highlights.md deleted file mode 100644 index 8589d5e96474..000000000000 --- a/Documentation/Books/Manual/Highlights.md +++ /dev/null @@ -1,160 +0,0 @@ -Highlights -========== - -Version 3.4 ------------ - -**All Editions** - -- [**ArangoSearch**](Views/ArangoSearch/README.md): - Search and similarity ranking engine integrated natively into ArangoDB and - AQL. ArangoSearch combines Boolean retrieval capabilities with generalized - ranking algorithms (BM25, TFDIF). Support of e.g. relevance-based searching, - phrase and prefix-matching, complex boolean searches and query time relevance - tuning. Search can be combined with all supported data models in a single - query. Many specialized language analyzers are already included for e.g. - English, German, French, Chinese, Spanish and many other language. - -- [**GeoJSON Support**](../AQL/Functions/Geo.html) and - [**S2 Geo Index**](Indexing/Geo.md): ArangoDB now supports all geo primitives. - (Multi-)Point, (Multi-)LineStrings, (Multi-)Polygons or intersections can be - defined and queried for. The Google S2 geo index is optimized for RocksDB and - enables efficient querying. Geo query results are automatically visualized - with an OpenStreetMap integration within the Query Editor of the web interface. - -- [**Query Profiler**](../AQL/ExecutionAndPerformance/QueryProfiler.html): - Enables the analysis of queries and adds additional information for the user - to identify optimization potentials more easily. The profiler can be accessed - via Arangosh with `db._profileQuery(...)` or via the *Profile* button in the - Query Editor of the web interface. - -- [**Streaming Cursors**](../AQL/Invocation/WithArangosh.html#setting-options): - Cursors requested with the stream option on make queries calculate results - on the fly and make them available for the client in a streaming fashion, - as soon as possible. - -- **RocksDB as Default Storage Engine**: With ArangoDB 3.4 the default - [storage engine](Architecture/StorageEngines.md) for fresh installations will - switch from MMFiles to RocksDB. Many optimizations have been made to RocksDB - since the first release in 3.2. For 3.4 we optimized the binary storage - format for improved insertion, implemented "optional caching", reduced the - replication catch-up time and much more. - -Also see [What's New in 3.4](ReleaseNotes/NewFeatures34.md). - -Version 3.3 ------------ - -**Enterprise Edition** - -- [**Datacenter to Datacenter Replication**](Deployment/DC2DC/README.md): - Replicate the entire structure and content of an ArangoDB cluster - asynchronously to another cluster in a different datacenter with ArangoSync. - Multi-datacenter support means you can fallback to a replica of your cluster - in case of a disaster in one datacenter. - -- [**Encrypted Backups**](Programs/Arangodump/Examples.md#encryption): - Arangodump can create backups encrypted with a secret key using AES256 - block cipher. - -**All Editions** - -- [**Server-level Replication**](Administration/MasterSlave/ServerLevelSetup.md): - In addition to per-database replication, there is now an additional - `globalApplier`. Start the global replication on the slave once and all - current and future databases will be replicated from the master to the - slave automatically. - -- [**Asynchronous Failover**](ReleaseNotes/NewFeatures33.md#asynchronous-failover): - Make a single server instance resilient with a second server instance, one - as master and the other as asynchronously replicating slave, with automatic - failover to the slave if the master goes down. - -Also see [What's New in 3.3](ReleaseNotes/NewFeatures33.md). - -Version 3.2 ------------ - -**All Editions** - -- [**RocksDB Storage Engine**](Architecture/StorageEngines.md): You can now use - as much data in ArangoDB as you can fit on your disk. Plus, you can enjoy - performance boosts on writes by having only document-level locks. - -- [**Pregel**](Graphs/Pregel/README.md): - We implemented distributed graph processing with Pregel to discover hidden - patterns, identify communities and perform in-depth analytics of large graph - data sets. - -- [**Fault-Tolerant Foxx**](../HTTP/Foxx/index.html): The Foxx management - internals have been rewritten from the ground up to make sure - multi-coordinator cluster setups always keep their services in sync and - new coordinators are fully initialized even when all existing coordinators - are unavailable. - -**Enterprise Edition** - -- [**LDAP integration**](Programs/Arangod/Ldap.md): Users and permissions - can be managed from outside ArangoDB with an LDAP server in different - authentication configurations. - -- [**Encryption at Rest**](Security/Encryption/README.md): Let the server - persist your sensitive data strongly encrypted to protect it even if the - physical storage medium gets stolen. - -- [**Satellite Collections**](Satellites.md): Faster join operations when - working with sharded datasets by synchronously replicating selected - collections to all database servers in a cluster, so that joins can be - executed locally. - -Also see [What's New in 3.2](ReleaseNotes/NewFeatures32.md). - -Version 3.1 ------------ - -**All Editions** - -- [**Vertex-centric indices**](Indexing/VertexCentric.md): - AQL traversal queries can utilize secondary edge collection - indexes for better performance against graphs with supernodes. - -- [**VelocyPack over HTTP**](https://www.arangodb.com/2016/10/updated-java-drivers-with-arangodb-3-1/): - In addition to JSON, the binary storage format VelocyPack can now also be - used in transport over the HTTP protocol, as well as streamed using the new - bi-directional asynchronous binary protocol **VelocyStream**. - -**Enterprise Edition** - -- [**SmartGraphs**](Graphs/SmartGraphs/README.md): Scale with graphs to a - cluster and stay performant. With SmartGraphs you can use the "smartness" - of your application layer to shard your graph efficiently to your machines - and let traversals run locally. - -- **Encryption Control**: Choose your level of [SSL encryption](Programs/Arangod/Ssl.md) - -- [**Auditing**](Security/Auditing/README.md): Keep a detailed log - of all the important things that happened in ArangoDB. - -Also see [What's New in 3.1](ReleaseNotes/NewFeatures31.md). - -Version 3.0 ------------ - -- [**self-organizing cluster**](Architecture/DeploymentModes/Cluster/Architecture.md) with - synchronous replication, master/master setup, shared nothing - architecture, cluster management agency. - -- Deeply integrated, native [**AQL graph traversal**](../AQL/Graphs/index.html) - -- [**VelocyPack**](https://github.com/arangodb/velocypack) as new internal - binary storage format as well as for intermediate AQL values. - -- [**Persistent indexes**](Indexing/Persistent.md) via RocksDB suitable - for sorting and range queries. - -- [**Foxx 3.0**](Foxx/README.md): overhauled JS framework for data-centric - microservices - -- Significantly improved [**Web Interface**](Programs/WebInterface/README.md) - -Also see [What's New in 3.0](ReleaseNotes/NewFeatures30.md). diff --git a/Documentation/Books/Manual/Indexing/Fulltext.md b/Documentation/Books/Manual/Indexing/Fulltext.md deleted file mode 100644 index fbe063b92d73..000000000000 --- a/Documentation/Books/Manual/Indexing/Fulltext.md +++ /dev/null @@ -1,100 +0,0 @@ -Fulltext indexes -================ - -This is an introduction to ArangoDB's fulltext indexes. - -Introduction to Fulltext Indexes --------------------------------- - -A fulltext index can be used to find words, or prefixes of words inside documents. - -A fulltext index can be defined on one attribute only, and will include all words contained in -documents that have a textual value in the index attribute. Since ArangoDB 2.6 the index -will also include words from the index attribute if the index attribute is an array of -strings, or an object with string value members. - -For example, given a fulltext index on the `translations` attribute and the following -documents, then searching for `лиса` using the fulltext index would return only the -first document. Searching for the index for the exact string `Fox` would return the first -two documents, and searching for `prefix:Fox` would return all three documents: - -```js -{ translations: { en: "fox", de: "Fuchs", fr: "renard", ru: "лиса" } } -{ translations: "Fox is the English translation of the German word Fuchs" } -{ translations: [ "ArangoDB", "document", "database", "Foxx" ] } -``` - -Note that deeper nested objects are ignored. For example, a fulltext index on -*translations* would index *Fuchs*, but not *fox*, given the following document -structure: - -```js -{ translations: { en: { US: "fox" }, de: "Fuchs" } -``` - -If you need to search across multiple fields and/or nested objects, you may write -all the strings into a special attribute, which you then create the index on -(it might be necessary to clean the strings first, e.g. remove line breaks and -strip certain words). - -If the index attribute is neither a string, an object or an array, its contents will -not be indexed. When indexing the contents of an array attribute, an array member will -only be included in the index if it is a string. When indexing the contents of an object -attribute, an object member value will only be included in the index if it is a string. -Other data types are ignored and not indexed. - - -Accessing Fulltext Indexes from the Shell ------------------------------------------ - - - -Ensures that a fulltext index exists: - -`collection.ensureIndex({ type: "fulltext", fields: [ "field" ], minLength: minLength })` - -Creates a fulltext index on all documents on attribute *field*. - -Fulltext indexes are implicitly sparse: all documents which do not have -the specified *field* attribute or that have a non-qualifying value in their -*field* attribute will be ignored for indexing. - -Only a single attribute can be indexed. Specifying multiple attributes is -unsupported. - -The minimum length of words that are indexed can be specified via the -*minLength* parameter. Words shorter than minLength characters will -not be indexed. *minLength* has a default value of 2, but this value might -be changed in future versions of ArangoDB. It is thus recommended to explicitly -specify this value. - -In case that the index was successfully created, an object with the index -details is returned. - - @startDocuBlockInline ensureFulltextIndex - @EXAMPLE_ARANGOSH_OUTPUT{ensureFulltextIndex} - ~db._create("example"); - db.example.ensureIndex({ type: "fulltext", fields: [ "text" ], minLength: 3 }); - db.example.save({ text : "the quick brown", b : { c : 1 } }); - db.example.save({ text : "quick brown fox", b : { c : 2 } }); - db.example.save({ text : "brown fox jums", b : { c : 3 } }); - db.example.save({ text : "fox jumps over", b : { c : 4 } }); - db.example.save({ text : "jumps over the", b : { c : 5 } }); - db.example.save({ text : "over the lazy", b : { c : 6 } }); - db.example.save({ text : "the lazy dog", b : { c : 7 } }); - db._query("FOR document IN FULLTEXT(example, 'text', 'the') RETURN document"); - ~db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock ensureFulltextIndex - - -Looks up a fulltext index: - -`collection.lookupFulltextIndex(attribute, minLength)` - -Checks whether a fulltext index on the given attribute *attribute* exists. - -Fulltext AQL Functions ----------------------- - -Fulltext AQL functions are detailed in [Fulltext functions](../../AQL/Functions/Fulltext.html). diff --git a/Documentation/Books/Manual/Indexing/Geo.md b/Documentation/Books/Manual/Indexing/Geo.md deleted file mode 100644 index 0ec4bfbee7d1..000000000000 --- a/Documentation/Books/Manual/Indexing/Geo.md +++ /dev/null @@ -1,511 +0,0 @@ -Geo-Spatial Indexes -=================== - -ArangoDB features a [Google S2](http://s2geometry.io/) based geospatial index -since version 3.4.0, which supersedes the previous geo index implementation. -Indexing is supported for a subset of the [**GeoJSON**](#geojson) geometry types -as well as simple latitude longitude pairs. - -AQL's geospatial functions and GeoJSON constructors are described in -[Geo functions](../../AQL/Functions/Geo.html). - -Using a Geo-Spatial Index -------------------------- - -The geospatial index supports containment and intersection -queries for various geometric 2D shapes. You should be mainly using AQL queries -to perform these types of operations. The index can operate in **two different -modes**, depending on if you want to use the GeoJSON data-format or not. The modes -are mainly toggled by using the `geoJson` field when creating the index. - -This index assumes coordinates with the latitude between -90 and 90 degrees and the -longitude between -180 and 180 degrees. A geo index will ignore all -documents which do not fulfill these requirements. - -### GeoJSON Mode - -To create an index in GeoJSON mode execute: - -``` -collection.ensureIndex({ type: "geo", fields: [ "geometry" ], geoJson:true }) -``` - -This creates the index on all documents and uses _geometry_ as the attributed -field where the value is either a [Geometry -Object](https://tools.ietf.org/html/rfc7946#section-3.1) **or** a _coordinate -array_. The array must contain at least two numeric values with longitude (first -value) and the latitude (second value). This corresponds to the format -described in [RFC 7946 -Position](https://tools.ietf.org/html/rfc7946#section-3.1.1) - -All documents, which do not have the attribute path or have a non-conform -value in it, are excluded from the index. - -A geo index is implicitly sparse, and there is no way to control its sparsity. -In case that the index was successfully created, an object with the index -details, including the index-identifier, is returned. - - -### Non-GeoJSON mode - -This index mode exclusively supports indexing on coordinate arrays. Values that -contain GeoJSON or other types of data will be ignored. In the non-GeoJSON mode -the index can be created on one or two fields. - -The following examples will work in the _arangosh_ command shell. - -To create a geo-spatial index on all documents using *latitude* and -*longitude* as separate attribute paths, two paths need to be specified -in the *fields* array: - -`collection.ensureIndex({ type: "geo", fields: [ "latitude", "longitude" ] })` - -The first field is always defined to be the _latitude_ and the second is the -_longitude_. The `geoJson` flag is implicitly _false_ in this mode. - -Alternatively you can specify only one field: - -`collection.ensureIndex({ type: "geo", fields: [ "location" ], geoJson:false })` - -It creates a geospatial index on all documents using *location* as the path to the -coordinates. The value of the attribute has to be an array with at least two -numeric values. The array must contain the latitude (first value) and the -longitude (second value). - -All documents, which do not have the attribute path(s) or have a non-conforming -value in it, are excluded from the index. - -A geo index is implicitly sparse, and there is no way to control its sparsity. -In case that the index was successfully created, an object with the index -details, including the index-identifier, is returned. - -In case that the index was successfully created, an object with the index -details, including the index-identifier, is returned. - -Indexed GeoSpatial Queries --------------------------- - -The geospatial index supports a variety of AQL queries, which can be built with the help -of the [geo utility functions](../../AQL/Functions/Geo.html). There are three specific -geo functions that can be optimized, provided that they are used correctly: -`GEO_DISTANCE, GEO_CONTAINS, GEO_INTERSECTS`. Additionally, there is a built-in support to optimize -the older geo functions `DISTANCE`, `NEAR` and `WITHIN` (the last two only if they are -used in their 4 argument version, without *distanceName*). - -When in doubt whether your query is being properly optimized, -check the [AQL explain](../../AQL/ExecutionAndPerformance/ExplainingQueries.html) -output to check for index usage. - -### Query for Results near Origin (NEAR type query) - -A basic example of a query for results near an origin point: - -``` -FOR x IN geo_collection - FILTER GEO_DISTANCE([@lng, @lat], x.geometry) <= 100000 - RETURN x._key -``` -The first parameter can be a GeoJSON object or a coordinate array in `[longitude, latitude]` ordering. -The second parameter is the document field on which the index was created. The function -`GEO_DISTANCE` always returns the distance in meters, so will receive results -up until _100km_. - - -### Query for Sorted Results near Origin (NEAR type query) - -A basic example of a query for the 1000 nearest results to an origin point (ascending sorting): - -``` -FOR x IN geo_collection - SORT GEO_DISTANCE([@lng, @lat], x.geometry) ASC - LIMIT 1000 - RETURN x._key -``` - -The first parameter can be a GeoJSON object or a coordinate array in `[longitude, latitude]` ordering. -The second parameter is the documents field on which the index was created. - -You may also get results farthest away (distance sorted in descending order): - -``` -FOR x IN geo_collection - SORT GEO_DISTANCE([@lng, @lat], x.geometry) DESC - LIMIT 1000 - RETURN x._key -``` - -### Query for Results within Distance - -A query which returns documents at a distance of _1km_ or farther away, -up to _100km_ from the origin. This will return the documents with a GeoJSON -value that is located in the specified search annulus. - -``` -FOR x IN geo_collection - FILTER GEO_DISTANCE([@lng, @lat], x.geometry) <= 100000 - FILTER GEO_DISTANCE([@lng, @lat], x.geometry) >= 1000 - RETURN x -``` - -### Query for Results contained in Polygon - -A query which returns documents whose stored geometry is contained within a -GeoJSON Polygon. - -``` -LET polygon = GEO_POLYGON([[[60,35],[50,5],[75,10],[70,35]]]) -FOR x IN geo_collection - FILTER GEO_CONTAINS(polygon, x.geometry) - RETURN x -``` - -The first parameter of `GEO_CONTAINS` must be a polygon. Other types are not valid. -The second parameter must contain the document field on which the index was created. - - -### Query for Results Intersecting a Polygon - -A query which returns documents with an intersection of their stored geometry and a -GeoJSON Polygon. - -``` -LET polygon = GEO_POLYGON([[[60,35],[50,5],[75,10],[70,35]]]) -FOR x IN geo_collection - FILTER GEO_INTERSECTS(polygon, x.geometry) - RETURN x -``` - -The first parameter of `GEO_INTERSECTS` must be a polygon. Other types are not valid. -The second parameter must contain the document field on which the index was created. - - -GeoJSON -------- - -GeoJSON is a geospatial data format based on JSON. It defines several different -types of JSON objects and the way in which they can be combined to represent -data about geographic shapes on the earth surface. GeoJSON uses a geographic -coordinate reference system, World Geodetic System 1984 (WGS 84), and units of decimal -degrees. - -Internally ArangoDB maps all coordinates onto a unit sphere. Distances are -projected onto a sphere with the Earth's *Volumetric mean radius* of *6371 -km*. ArangoDB implements a useful subset of the GeoJSON format [(RFC -7946)](https://tools.ietf.org/html/rfc7946). We do not support Feature Objects -or the GeometryCollection type. Supported geometry object types are: - -- Point -- MultiPoint -- LineString -- MultiLineString -- Polygon - -### Point - -A [GeoJSON Point](https://tools.ietf.org/html/rfc7946#section-3.1.2) is a -[position](https://tools.ietf.org/html/rfc7946#section-3.1.1) comprised of -a longitude and a latitude: - -```json -{ - "type": "Point", - "coordinates": [100.0, 0.0] -} -``` - -### MultiPoint - -A [GeoJSON MultiPoint](https://tools.ietf.org/html/rfc7946#section-3.1.7) is -an array of positions: - -```json -{ - "type": "MultiPoint", - "coordinates": [ - [100.0, 0.0], - [101.0, 1.0] - ] -} -``` - -### LineString - -A [GeoJSON LineString](https://tools.ietf.org/html/rfc7946#section-3.1.4) is -an array of two or more positions: - -```json -{ - "type": "LineString", - "coordinates": [ - [100.0, 0.0], - [101.0, 1.0] - ] -} -``` - -### MultiLineString - -A [GeoJSON MultiLineString](https://tools.ietf.org/html/rfc7946#section-3.1.5) is -an array of LineString coordinate arrays: - -```json -{ - "type": "MultiLineString", - "coordinates": [ - [ - [100.0, 0.0], - [101.0, 1.0] - ], - [ - [102.0, 2.0], - [103.0, 3.0] - ] - ] -} -``` - -### Polygon - -A [GeoJSON Polygon](https://tools.ietf.org/html/rfc7946#section-3.1.6) consists -of a series of closed `LineString` objects (ring-like). These *Linear Ring* objects -consist of four or more vertices with the first and last coordinate pairs -being equal. Coordinates of a Polygon are an array of linear ring coordinate -arrays. The first element in the array represents the exterior ring. -Any subsequent elements represent interior rings (holes within the surface). - -- A linear ring may not be empty, it needs at least three _distinct_ coordinates -- Within the same linear ring consecutive coordinates may be the same, otherwise - (except the first and last one) all coordinates need to be distinct - -No Holes: - -```json -{ - "type": "Polygon", - "coordinates": [ - [ - [100.0, 0.0], - [101.0, 0.0], - [101.0, 1.0], - [100.0, 1.0], - [100.0, 0.0] - ] - ] -} -``` - -With Holes: - -- The exterior ring should not self-intersect. -- The interior rings must be contained in the outer ring -- No two rings can cross each other, i.e. no ring may intersect both the interior and exterior face of another ring -- Rings cannot share edges, they may however share vertices -- No ring may be empty -- Polygon rings should follow the right-hand rule for orientation - (counterclockwise external rings, clockwise internal rings). - -```json -{ - "type": "Polygon", - "coordinates": [ - [ - [100.0, 0.0], - [101.0, 0.0], - [101.0, 1.0], - [100.0, 1.0], - [100.0, 0.0] - ], - [ - [100.8, 0.8], - [100.8, 0.2], - [100.2, 0.2], - [100.2, 0.8], - [100.8, 0.8] - ] - ] -} -``` - -### MultiPolygon - -A [GeoJSON MultiPolygon](https://tools.ietf.org/html/rfc7946#section-3.1.6) consists -of multiple polygons. The "coordinates" member is an array of -_Polygon_ coordinate arrays. - -- Polygons in the same MultiPolygon may not share edges, they may share coordinates -- Polygons and rings must not be empty -- Linear Rings **MUST** follow the right-hand rule for orientation - (counterclockwise external rings, clockwise internal rings). - -Example with two polygons, the second one with a hole: - -```json -{ - "type": "MultiPolygon", - "coordinates": [ - [ - [ - [102.0, 2.0], - [103.0, 2.0], - [103.0, 3.0], - [102.0, 3.0], - [102.0, 2.0] - ] - ], - [ - [ - [100.0, 0.0], - [101.0, 0.0], - [101.0, 1.0], - [100.0, 1.0], - [100.0, 0.0] - ], - [ - [100.2, 0.2], - [100.2, 0.8], - [100.8, 0.8], - [100.8, 0.2], - [100.2, 0.2] - ] - ] - ] -} -``` - -Arangosh Examples ------------------ - - - -ensures that a geo index exists -`collection.ensureIndex({ type: "geo", fields: [ "location" ] })` - -Creates a geospatial index on all documents using *location* as the path to the -coordinates. The value of the attribute has to be an array with at least two -numeric values. The array must contain the latitude (first value) and the -longitude (second value). - -All documents, which do not have the attribute path or have a non-conforming -value in it, are excluded from the index. - -A geo index is implicitly sparse, and there is no way to control its sparsity. - -In case that the index was successfully created, an object with the index -details, including the index-identifier, is returned. - -To create a geo index on an array attribute that contains longitude first, set -the *geoJson* attribute to `true`. This corresponds to the format described in -[RFC 7946 Position](https://tools.ietf.org/html/rfc7946#section-3.1.1) - -`collection.ensureIndex({ type: "geo", fields: [ "location" ], geoJson: true })` - -To create a geo-spatial index on all documents using *latitude* and *longitude* -as separate attribute paths, two paths need to be specified in the *fields* -array: - -`collection.ensureIndex({ type: "geo", fields: [ "latitude", "longitude" ] })` - -In case that the index was successfully created, an object with the index -details, including the index-identifier, is returned. - -**Examples** - -Create a geo index for an array attribute: - - @startDocuBlockInline geoIndexCreateForArrayAttribute1 - @EXAMPLE_ARANGOSH_OUTPUT{geoIndexCreateForArrayAttribute1} - ~db._create("geo") - db.geo.ensureIndex({ type: "geo", fields: [ "loc" ] }); - | for (i = -90; i <= 90; i += 10) { - | for (j = -180; j <= 180; j += 10) { - | db.geo.save({ name : "Name/" + i + "/" + j, loc: [ i, j ] }); - | } - } - db.geo.count(); - db.geo.near(0, 0).limit(3).toArray(); - db.geo.near(0, 0).count(); - ~db._drop("geo") - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock geoIndexCreateForArrayAttribute1 - -Create a geo index for a hash array attribute: - - @startDocuBlockInline geoIndexCreateForArrayAttribute2 - @EXAMPLE_ARANGOSH_OUTPUT{geoIndexCreateForArrayAttribute2} - ~db._drop("geo2") - ~db._create("geo2") - db.geo2.ensureIndex({ type: "geo", fields: [ "location.latitude", "location.longitude" ] }); - | for (i = -90; i <= 90; i += 10) { - | for (j = -180; j <= 180; j += 10) { - | db.geo2.save({ name : "Name/" + i + "/" + j, location: { latitude : i, longitude : j } }); - | } - } - db.geo2.near(0, 0).limit(3).toArray(); - ~db._drop("geo2") - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock geoIndexCreateForArrayAttribute2 - -Use GeoIndex with AQL SORT statement: - - @startDocuBlockInline geoIndexSortOptimization - @EXAMPLE_ARANGOSH_OUTPUT{geoIndexSortOptimization} - ~db._create("geoSort") - db.geoSort.ensureIndex({ type: "geo", fields: [ "latitude", "longitude" ] }); - | for (i = -90; i <= 90; i += 10) { - | for (j = -180; j <= 180; j += 10) { - | db.geoSort.save({ name : "Name/" + i + "/" + j, latitude : i, longitude : j }); - | } - } - var query = "FOR doc in geoSort SORT DISTANCE(doc.latitude, doc.longitude, 0, 0) LIMIT 5 RETURN doc" - db._explain(query, {}, {colors: false}); - db._query(query); - ~db._drop("geoSort") - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock geoIndexSortOptimization - -Use GeoIndex with AQL FILTER statement: - - @startDocuBlockInline geoIndexFilterOptimization - @EXAMPLE_ARANGOSH_OUTPUT{geoIndexFilterOptimization} - ~db._create("geoFilter") - db.geoFilter.ensureIndex({ type: "geo", fields: [ "latitude", "longitude" ] }); - | for (i = -90; i <= 90; i += 10) { - | for (j = -180; j <= 180; j += 10) { - | db.geoFilter.save({ name : "Name/" + i + "/" + j, latitude : i, longitude : j }); - | } - } - var query = "FOR doc in geoFilter FILTER DISTANCE(doc.latitude, doc.longitude, 0, 0) < 2000 RETURN doc" - db._explain(query, {}, {colors: false}); - db._query(query); - ~db._drop("geoFilter") - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock geoIndexFilterOptimization - - - -@startDocuBlock collectionGeo - - -@startDocuBlock collectionNear - - -@startDocuBlock collectionWithin - -ensures that a geo index exists -`collection.ensureIndex({ type: "geo", fields: [ "location" ] })` - -Since ArangoDB 2.5, this method is an alias for *ensureGeoIndex* since -geo indexes are always sparse, meaning that documents that do not contain -the index attributes or has non-numeric values in the index attributes -will not be indexed. *ensureGeoConstraint* is deprecated and *ensureGeoIndex* -should be used instead. - -The index does not provide a `unique` option because of its limited usability. -It would prevent identical coordinates from being inserted only, but even a -slightly different location (like 1 inch or 1 cm off) would be unique again and -not considered a duplicate, although it probably should. The desired threshold -for detecting duplicates may vary for every project (including how to calculate -the distance even) and needs to be implemented on the application layer as -needed. You can write a [Foxx service](../Foxx/index.html) for this purpose and -make use of the AQL [geo functions](../../AQL/Functions/Geo.html) to find nearby -coordinates supported by a geo index. diff --git a/Documentation/Books/Manual/Indexing/Hash.md b/Documentation/Books/Manual/Indexing/Hash.md deleted file mode 100644 index f76da804d77d..000000000000 --- a/Documentation/Books/Manual/Indexing/Hash.md +++ /dev/null @@ -1,170 +0,0 @@ -Hash Indexes -============ - -Introduction to Hash Indexes ----------------------------- - -It is possible to define a hash index on one or more attributes (or paths) of a -document. This hash index is then used in queries to locate documents in O(1) -operations. If the hash index is unique, then no two documents are allowed to have the -same set of attribute values. - -Creating a new document or updating a document will fail if the uniqueness is violated. -If the index is declared sparse, a document will be excluded from the index and no -uniqueness checks will be performed if any index attribute value is not set or has a value -of `null`. - -Accessing Hash Indexes from the Shell -------------------------------------- - -### Unique Hash Indexes - - - -Ensures that a unique constraint exists: -`collection.ensureIndex({ type: "hash", fields: [ "field1", ..., "fieldn" ], unique: true })` - -Creates a unique hash index on all documents using *field1*, ... *fieldn* -as attribute paths. At least one attribute path has to be given. -The index will be non-sparse by default. - -All documents in the collection must differ in terms of the indexed -attributes. Creating a new document or updating an existing document will -will fail if the attribute uniqueness is violated. - -To create a sparse unique index, set the *sparse* attribute to `true`: - -`collection.ensureIndex({ type: "hash", fields: [ "field1", ..., "fieldn" ], unique: true, sparse: true })` - -In case that the index was successfully created, the index identifier is returned. - -Non-existing attributes will default to `null`. -In a sparse index all documents will be excluded from the index for which all -specified index attributes are `null`. Such documents will not be taken into account -for uniqueness checks. - -In a non-sparse index, **all** documents regardless of `null` - attributes will be -indexed and will be taken into account for uniqueness checks. - -In case that the index was successfully created, an object with the index -details, including the index-identifier, is returned. - - @startDocuBlockInline ensureUniqueConstraint - @EXAMPLE_ARANGOSH_OUTPUT{ensureUniqueConstraint} - ~db._create("test"); - db.test.ensureIndex({ type: "hash", fields: [ "a", "b.c" ], unique: true }); - db.test.save({ a : 1, b : { c : 1 } }); - db.test.save({ a : 1, b : { c : 1 } }); // xpError(ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) - db.test.save({ a : 1, b : { c : null } }); - db.test.save({ a : 1 }); // xpError(ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) - ~db._drop("test"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock ensureUniqueConstraint - -### Non-unique Hash Indexes - - - -Ensures that a non-unique hash index exists: -`collection.ensureIndex({ type: "hash", fields: [ "field1", ..., "fieldn" ] })` - -Creates a non-unique hash index on all documents using *field1*, ... *fieldn* -as attribute paths. At least one attribute path has to be given. -The index will be non-sparse by default. - -To create a sparse unique index, set the *sparse* attribute to `true`: - -`collection.ensureIndex({ type: "hash", fields: [ "field1", ..., "fieldn" ], sparse: true })` - -In case that the index was successfully created, an object with the index -details, including the index-identifier, is returned. - - @startDocuBlockInline ensureHashIndex - @EXAMPLE_ARANGOSH_OUTPUT{ensureHashIndex} - ~db._create("test"); - db.test.ensureIndex({ type: "hash", fields: [ "a" ] }); - db.test.save({ a : 1 }); - db.test.save({ a : 1 }); - db.test.save({ a : null }); - ~db._drop("test"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock ensureHashIndex - -### Hash Array Indexes - -Ensures that a hash array index exists (non-unique): -`collection.ensureIndex({ type: "hash", fields: [ "field1[*]", ..., "fieldn[*]" ] })` - -Creates a non-unique hash array index for the individual elements of the array -attributes field1[*], ... fieldn[*] found in the documents. At least -one attribute path has to be given. The index always treats the indexed arrays as -sparse. - -It is possible to combine array indexing with standard indexing: -`collection.ensureIndex({ type: "hash", fields: [ "field1[*]", "field2" ] })` - -In case that the index was successfully created, an object with the index -details, including the index-identifier, is returned. - - @startDocuBlockInline ensureHashIndexArray - @EXAMPLE_ARANGOSH_OUTPUT{ensureHashIndexArray} - ~db._create("test"); - db.test.ensureIndex({ type: "hash", fields: [ "a[*]" ] }); - db.test.save({ a : [ 1, 2 ] }); - db.test.save({ a : [ 1, 3 ] }); - db.test.save({ a : null }); - ~db._drop("test"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock ensureHashIndexArray - - -Creating Hash Index in Background ---------------------------------- - -{% hint 'info' %} -This section only applies to the *rocksdb* storage engine -{% endhint %} - -Creating new indexes is by default done under an exclusive collection lock. This means -that the collection (or the respective shards) are not available as long as the index -is created. This "foreground" index creation can be undesirable, if you have to perform it -on a live system without a dedicated maintenance window. - -Indexes can also be created in "background", not using an exclusive lock during the creation. -The collection remains available, other CRUD operations can run on the collection while the index is created. -This can be achieved by using the *inBackground* option. - -To create an hash index in the background in *arangosh* just specify `inBackground: true`: - -```js -db.collection.ensureIndex({ type: "hash", fields: [ "value" ], inBackground: true }); -``` - -For more information see "Creating Indexes in Background" in the [Index basics](IndexBasics.md) page. - - -Ensure uniqueness of relations in edge collections --------------------------------------------------- - -It is possible to create secondary indexes using the edge attributes `_from` -and `_to`, starting with ArangoDB 3.0. A combined index over both fields together -with the unique option enabled can be used to prevent duplicate relations from -being created. - -For example, a document collection *verts* might contain vertices with the document -handles `verts/A`, `verts/B` and `verts/C`. Relations between these documents can -be stored in an edge collection *edges* for instance. Now, you may want to make sure -that the vertex `verts/A` is never linked to `verts/B` by an edge more than once. -This can be achieved by adding a unique, non-sparse hash index for the fields `_from` -and `_to`: - - db.edges.ensureIndex({ type: "hash", fields: [ "_from", "_to" ], unique: true }); - -Creating an edge `{ _from: "verts/A", _to: "verts/B" }` in *edges* will be accepted, -but only once. Another attempt to store an edge with the relation **A** → **B** will -be rejected by the server with a *unique constraint violated* error. This includes -updates to the `_from` and `_to` fields. - -Note that adding a relation **B** → **A** is still possible, so is **A** → **A** -and **B** → **B**, because they are all different relations in a directed graph. -Each one can only occur once however. diff --git a/Documentation/Books/Manual/Indexing/IndexBasics.md b/Documentation/Books/Manual/Indexing/IndexBasics.md deleted file mode 100644 index 6ba098dd2b3c..000000000000 --- a/Documentation/Books/Manual/Indexing/IndexBasics.md +++ /dev/null @@ -1,673 +0,0 @@ -Index basics -============ - -Indexes allow fast access to documents, provided the indexed attribute(s) -are used in a query. While ArangoDB automatically indexes some system -attributes, users are free to create extra indexes on non-system attributes -of documents. - -User-defined indexes can be created on collection level. Most user-defined indexes -can be created by specifying the names of the index attributes. -Some index types allow indexing just one attribute (e.g. *fulltext* index) whereas -other index types allow indexing multiple attributes at the same time. - -Learn how to use different indexes efficiently by going through the -[ArangoDB Performance Course](https://www.arangodb.com/arangodb-performance-course/). - -The system attributes `_id`, `_key`, `_from` and `_to` are automatically indexed -by ArangoDB, without the user being required to create extra indexes for them. -`_id` and `_key` are covered by a collection's primary key, and `_from` and `_to` -are covered by an edge collection's edge index automatically. - -Using the system attribute `_id` in user-defined indexes is not possible, but -indexing `_key`, `_rev`, `_from`, and `_to` is. - -Creating new indexes is by default done under an exclusive collection lock. The collection is not -available while the index is being created. This "foreground" index creation can be undesirable, -if you have to perform it on a live system without a dedicated maintenance window. - -For potentially long running index creation operations the _RocksDB_ storage-engine also supports -creating indexes in "background". The collection remains (mostly) available during the index creation, -see the section [Creating Indexes in Background](#creating-indexes-in-background) for more information. - - -ArangoDB provides the following index types: - -Primary Index -------------- - -For each collection there will always be a *primary index* which is a hash index -for the [document keys](../Appendix/Glossary.md#document-key) (`_key` attribute) -of all documents in the collection. The primary index allows quick selection -of documents in the collection using either the `_key` or `_id` attributes. It will -be used from within AQL queries automatically when performing equality lookups on -`_key` or `_id`. - -There are also dedicated functions to find a document given its `_key` or `_id` -that will always make use of the primary index: - -```js -db.collection.document(""); -db._document(""); -``` - -As the primary index is an unsorted hash index, it cannot be used for non-equality -range queries or for sorting. - -The primary index of a collection cannot be dropped or changed, and there is no -mechanism to create user-defined primary indexes. - - -Edge Index ----------- - -Every [edge collection](../Appendix/Glossary.md#edge-collection) also has an -automatically created *edge index*. The edge index provides quick access to -documents by either their `_from` or `_to` attributes. It can therefore be -used to quickly find connections between vertex documents and is invoked when -the connecting edges of a vertex are queried. - -Edge indexes are used from within AQL when performing equality lookups on `_from` -or `_to` values in an edge collections. There are also dedicated functions to -find edges given their `_from` or `_to` values that will always make use of the -edge index: - -```js -db.collection.edges(""); -db.collection.edges(""); -db.collection.outEdges(""); -db.collection.outEdges(""); -db.collection.inEdges(""); -db.collection.inEdges(""); -``` - -Internally, the edge index is implemented as a hash index, which stores the union -of all `_from` and `_to` attributes. It can be used for equality -lookups, but not for range queries or for sorting. Edge indexes are automatically -created for edge collections. It is not possible to create user-defined edge indexes. -However, it is possible to freely use the `_from` and `_to` attributes in user-defined -indexes. - -An edge index cannot be dropped or changed. - - -Hash Index ----------- - -A hash index can be used to quickly find documents with specific attribute values. -The hash index is unsorted, so it supports equality lookups but no range queries or sorting. - -A hash index can be created on one or multiple document attributes. A hash index will -only be used by a query if all index attributes are present in the search condition, -and if all attributes are compared using the equality (`==`) operator. Hash indexes are -used from within AQL and several query functions, e.g. `byExample`, `firstExample` etc. - -Hash indexes can optionally be declared unique, then disallowing saving the same -value(s) in the indexed attribute(s). Hash indexes can optionally be sparse. - -The different types of hash indexes have the following characteristics: - -- **unique hash index**: all documents in the collection must have different values for - the attributes covered by the unique index. Trying to insert a document with the same - key value as an already existing document will lead to a unique constraint - violation. - - This type of index is not sparse. Documents that do not contain the index attributes or - that have a value of `null` in the index attribute(s) will still be indexed. - A key value of `null` may only occur once in the index, so this type of index cannot - be used for optional attributes. - - The unique option can also be used to ensure that - [no duplicate edges](Hash.md#ensure-uniqueness-of-relations-in-edge-collections) are - created, by adding a combined index for the fields `_from` and `_to` to an edge collection. - -- **unique, sparse hash index**: all documents in the collection must have different - values for the attributes covered by the unique index. Documents in which at least one - of the index attributes is not set or has a value of `null` are not included in the - index. This type of index can be used to ensure that there are no duplicate keys in - the collection for documents which have the indexed attributes set. As the index will - exclude documents for which the indexed attributes are `null` or not set, it can be - used for optional attributes. - -- **non-unique hash index**: all documents in the collection will be indexed. This type - of index is not sparse. Documents that do not contain the index attributes or that have - a value of `null` in the index attribute(s) will still be indexed. Duplicate key values - can occur and do not lead to unique constraint violations. - -- **non-unique, sparse hash index**: only those documents will be indexed that have all - the indexed attributes set to a value other than `null`. It can be used for optional - attributes. - -The amortized complexity of lookup, insert, update, and removal operations in unique hash -indexes is O(1). - -Non-unique hash indexes have an amortized complexity of O(1) for insert, update, and -removal operations. That means non-unique hash indexes can be used on attributes with -low cardinality. - -If a hash index is created on an attribute that is missing in all or many of the documents, -the behavior is as follows: - -- if the index is sparse, the documents missing the attribute will not be indexed and not - use index memory. These documents will not influence the update or removal performance - for the index. - -- if the index is non-sparse, the documents missing the attribute will be contained in the - index with a key value of `null`. - -Hash indexes support [indexing array values](#indexing-array-values) if the index -attribute name is extended with a [\*]. - - -Skiplist Index --------------- - -A skiplist is a sorted index structure. It can be used to quickly find documents -with specific attribute values, for range queries and for returning documents from -the index in sorted order. Skiplists will be used from within AQL and several query -functions, e.g. `byExample`, `firstExample` etc. - -Skiplist indexes will be used for lookups, range queries and sorting only if either all -index attributes are provided in a query, or if a leftmost prefix of the index attributes -is specified. - -For example, if a skiplist index is created on attributes `value1` and `value2`, the -following filter conditions can use the index (note: the `<=` and `>=` operators are -intentionally omitted here for the sake of brevity): - -```js -FILTER doc.value1 == ... -FILTER doc.value1 < ... -FILTER doc.value1 > ... -FILTER doc.value1 > ... && doc.value1 < ... - -FILTER doc.value1 == ... && doc.value2 == ... -FILTER doc.value1 == ... && doc.value2 > ... -FILTER doc.value1 == ... && doc.value2 > ... && doc.value2 < ... -``` - -In order to use a skiplist index for sorting, the index attributes must be specified in -the `SORT` clause of the query in the same order as they appear in the index definition. -Skiplist indexes are always created in ascending order, but they can be used to access -the indexed elements in both ascending or descending order. However, for a combined index -(an index on multiple attributes) this requires that the sort orders in a single query -as specified in the `SORT` clause must be either all ascending (optionally omitted -as ascending is the default) or all descending. - -For example, if the skiplist index is created on attributes `value1` and `value2` -(in this order), then the following sorts clauses can use the index for sorting: - -- `SORT value1 ASC, value2 ASC` (and its equivalent `SORT value1, value2`) -- `SORT value1 DESC, value2 DESC` -- `SORT value1 ASC` (and its equivalent `SORT value1`) -- `SORT value1 DESC` - -The following sort clauses cannot make use of the index order, and require an extra -sort step: - -- `SORT value1 ASC, value2 DESC` -- `SORT value1 DESC, value2 ASC` -- `SORT value2` (and its equivalent `SORT value2 ASC`) -- `SORT value2 DESC` (because first indexed attribute `value1` is not used in sort clause) - -Note: the latter two sort clauses cannot use the index because the sort clause does not -refer to a leftmost prefix of the index attributes. - -Skiplists can optionally be declared unique, disallowing saving the same value in the indexed -attribute. They can be sparse or non-sparse. - -The different types of skiplist indexes have the following characteristics: - -- **unique skiplist index**: all documents in the collection must have different values for - the attributes covered by the unique index. Trying to insert a document with the same - key value as an already existing document will lead to a unique constraint - violation. - - This type of index is not sparse. Documents that do not contain the index attributes or - that have a value of `null` in the index attribute(s) will still be indexed. - A key value of `null` may only occur once in the index, so this type of index cannot - be used for optional attributes. - -- **unique, sparse skiplist index**: all documents in the collection must have different - values for the attributes covered by the unique index. Documents in which at least one - of the index attributes is not set or has a value of `null` are not included in the - index. This type of index can be used to ensure that there are no duplicate keys in - the collection for documents which have the indexed attributes set. As the index will - exclude documents for which the indexed attributes are `null` or not set, it can be - used for optional attributes. - -- **non-unique skiplist index**: all documents in the collection will be indexed. This type - of index is not sparse. Documents that do not contain the index attributes or that have - a value of `null` in the index attribute(s) will still be indexed. Duplicate key values - can occur and do not lead to unique constraint violations. - -- **non-unique, sparse skiplist index**: only those documents will be indexed that have all - the indexed attributes set to a value other than `null`. It can be used for optional - attributes. - -The operational amortized complexity for skiplist indexes is logarithmically correlated -with the number of documents in the index. - -Skiplist indexes support [indexing array values](#indexing-array-values) if the index -attribute name is extended with a [\*]`. - -TTL (time-to-live) Index ------------------------- - -The TTL index provided by ArangoDB can be used for automatically removing expired documents -from a collection. - -A TTL index is set up by setting an `expireAfter` value and by picking a single -document attribute which contains the documents' creation date and time. Documents -are expired after `expireAfter` seconds after their creation time. The creation time -is specified as either a numeric timestamp (Unix timestamp) or a date string in format -`YYYY-MM-DDTHH:MM:SS` with optional milliseconds. All date strings will be interpreted -as UTC dates. - -For example, if `expireAfter` is set to 600 seconds (10 minutes) and the index -attribute is "creationDate" and there is the following document: - - { "creationDate" : 1550165973 } - -This document will be indexed with a creation date time value of `1550165973`, -which translates to the human-readable date `2019-02-14T17:39:33.000Z`. The document -will expire 600 seconds afterwards, which is at timestamp `1550166573` (or -`2019-02-14T17:49:33.000Z` in the human-readable version). - -The actual removal of expired documents will not necessarily happen immediately. -Expired documents will eventually removed by a background thread that is periodically -going through all TTL indexes and removing the expired documents. The frequency for -invoking this background thread can be configured using the `--ttl.frequency` -startup option. - -There is no guarantee when exactly the removal of expired documents will be carried -out, so queries may still find and return documents that have already expired. These -will eventually be removed when the background thread kicks in and has capacity to -remove the expired documents. It is guaranteed however that only documents which are -past their expiration time will actually be removed. - -Please note that the numeric date time values for the index attribute should be -specified in milliseconds since January 1st 1970 (Unix timestamp). To calculate the current -timestamp from JavaScript in this format, there is `Date.now() / 1000`, to calculate it -from an arbitrary Date instance, there is `Date.getTime() / 1000`. - -Alternatively, the index attribute values can be specified as a date string in format -`YYYY-MM-DDTHH:MM:SS` with optional milliseconds. All date strings will be interpreted -as UTC dates. - -The above example document using a datestring attribute value would be - - { "creationDate" : "2019-02-14T17:39:33.000Z" } - -In case the index attribute does not contain a numeric value nor a proper date string, -the document will not be stored in the TTL index and thus will not become a candidate -for expiration and removal. Providing either a non-numeric value or even no value for -the index attribute is a supported way of keeping documents from being expired and removed. - - -Geo Index ---------- - -Users can create additional geo indexes on one or multiple attributes in collections. -A geo index is used to find places on the surface of the earth fast. - -The geo index stores two-dimensional coordinates. It can be created on either two -separate document attributes (latitude and longitude) or a single array attribute that -contains both latitude and longitude. Latitude and longitude must be numeric values. - -The geo index provides operations to find documents with coordinates nearest to a given -comparison coordinate, and to find documents with coordinates that are within a specifiable -radius around a comparison coordinate. - -The geo index is used via dedicated functions in AQL, the simple queries -functions and it is implicitly applied when in AQL a SORT or FILTER is used with -the distance function. Otherwise it will not be used for other types of queries -or conditions. - - -Fulltext Index --------------- - -A fulltext index can be used to find words, or prefixes of words inside documents. -A fulltext index can be created on a single attribute only, and will index all words -contained in documents that have a textual value in that attribute. Only words with a (specifiable) -minimum length are indexed. Word tokenization is done using the word boundary analysis -provided by libicu, which is taking into account the selected language provided at -server start. Words are indexed in their lower-cased form. The index supports complete -match queries (full words) and prefix queries, plus basic logical operations such as -`and`, `or` and `not` for combining partial results. - -The fulltext index is sparse, meaning it will only index documents for which the index -attribute is set and contains a string value. Additionally, only words with a configurable -minimum length will be included in the index. - -The fulltext index is used via dedicated functions in AQL or the simple queries, but will -not be enabled for other types of queries or conditions. - - -Persistent Index ----------------- - -{% hint 'warning' %} -this index should not be used anymore, instead use the rocksdb storage engine -with either the *skiplist* or *hash* index. -{% endhint %} - -The persistent index is a sorted index with persistence. The index entries are written to -disk when documents are stored or updated. That means the index entries do not need to be -rebuilt from the collection data when the server is restarted or the indexed collection -is initially loaded. Thus using persistent indexes may reduce collection loading times. - -The persistent index type can be used for secondary indexes at the moment. That means the -persistent index currently cannot be made the only index for a collection, because there -will always be the in-memory primary index for the collection in addition, and potentially -more indexes (such as the edges index for an edge collection). - -The index implementation is using the RocksDB engine, and it provides logarithmic complexity -for insert, update, and remove operations. As the persistent index is not an in-memory -index, it does not store pointers into the primary index as all the in-memory indexes do, -but instead it stores a document's primary key. To retrieve a document via a persistent -index via an index value lookup, there will therefore be an additional O(1) lookup into -the primary index to fetch the actual document. - -As the persistent index is sorted, it can be used for point lookups, range queries and sorting -operations, but only if either all index attributes are provided in a query, or if a leftmost -prefix of the index attributes is specified. - - -Indexing attributes and sub-attributes --------------------------------------- - -Top-level as well as nested attributes can be indexed. For attributes at the top level, -the attribute names alone are required. To index a single field, pass an array with a -single element (string of the attribute key) to the *fields* parameter of the -[ensureIndex() method](WorkingWithIndexes.md#creating-an-index). To create a -combined index over multiple fields, simply add more members to the *fields* array: - -```js -// { name: "Smith", age: 35 } -db.posts.ensureIndex({ type: "hash", fields: [ "name" ] }) -db.posts.ensureIndex({ type: "hash", fields: [ "name", "age" ] }) -``` - -To index sub-attributes, specify the attribute path using the dot notation: - -```js -// { name: {last: "Smith", first: "John" } } -db.posts.ensureIndex({ type: "hash", fields: [ "name.last" ] }) -db.posts.ensureIndex({ type: "hash", fields: [ "name.last", "name.first" ] }) -``` - -Indexing array values ---------------------- - -If an index attribute contains an array, ArangoDB will store the entire array as the index value -by default. Accessing individual members of the array via the index is not possible this -way. - -To make an index insert the individual array members into the index instead of the entire array -value, a special array index needs to be created for the attribute. Array indexes can be set up -like regular hash or skiplist indexes using the `collection.ensureIndex()` function. To make a -hash or skiplist index an array index, the index attribute name needs to be extended with [\*] -when creating the index and when filtering in an AQL query using the `IN` operator. - -The following example creates an array hash index on the `tags` attribute in a collection named -`posts`: - -```js -db.posts.ensureIndex({ type: "hash", fields: [ "tags[*]" ] }); -db.posts.insert({ tags: [ "foobar", "baz", "quux" ] }); -``` - -This array index can then be used for looking up individual `tags` values from AQL queries via -the `IN` operator: - -```js -FOR doc IN posts - FILTER 'foobar' IN doc.tags - RETURN doc -``` - -It is possible to add the [array expansion operator](../../AQL/Advanced/ArrayOperators.html#array-expansion) -[\*], but it is not mandatory. You may use it to indicate that an array index is used, -it is purely cosmetic however: - -```js -FOR doc IN posts - FILTER 'foobar' IN doc.tags[*] - RETURN doc -``` - -The following FILTER conditions will **not use** the array index: - -```js -FILTER doc.tags ANY == 'foobar' -FILTER doc.tags ANY IN 'foobar' -FILTER doc.tags IN 'foobar' -FILTER doc.tags == 'foobar' -FILTER 'foobar' == doc.tags -``` - -It is also possible to create an index on subattributes of array values. This makes sense -if the index attribute is an array of objects, e.g. - -```js -db.posts.ensureIndex({ type: "hash", fields: [ "tags[*].name" ] }); -db.posts.insert({ tags: [ { name: "foobar" }, { name: "baz" }, { name: "quux" } ] }); -``` - -The following query will then use the array index (this does require the -[array expansion operator](../../AQL/Advanced/ArrayOperators.html#array-expansion)): - -```js -FOR doc IN posts - FILTER 'foobar' IN doc.tags[*].name - RETURN doc -``` - -If you store a document having the array which does contain elements not having -the subattributes this document will also be indexed with the value `null`, which -in ArangoDB is equal to attribute not existing. - -ArangoDB supports creating array indexes with a single [\*] operator per index -attribute. For example, creating an index as follows is **not supported**: - -```js -db.posts.ensureIndex({ type: "hash", fields: [ "tags[*].name[*].value" ] }); -``` - -Array values will automatically be de-duplicated before being inserted into an array index. -For example, if the following document is inserted into the collection, the duplicate array -value `bar` will be inserted only once: - -```js -db.posts.insert({ tags: [ "foobar", "bar", "bar" ] }); -``` - -This is done to avoid redundant storage of the same index value for the same document, which -would not provide any benefit. - -If an array index is declared **unique**, the de-duplication of array values will happen before -inserting the values into the index, so the above insert operation with two identical values -`bar` will not necessarily fail - -It will always fail if the index already contains an instance of the `bar` value. However, if -the value `bar` is not already present in the index, then the de-duplication of the array values will -effectively lead to `bar` being inserted only once. - -To turn off the deduplication of array values, it is possible to set the **deduplicate** attribute -on the array index to `false`. The default value for **deduplicate** is `true` however, so -de-duplication will take place if not explicitly turned off. - -```js -db.posts.ensureIndex({ type: "hash", fields: [ "tags[*]" ], deduplicate: false }); - -// will fail now -db.posts.insert({ tags: [ "foobar", "bar", "bar" ] }); -``` - -If an array index is declared and you store documents that do not have an array at the specified attribute -this document will not be inserted in the index. Hence the following objects will not be indexed: - -```js -db.posts.ensureIndex({ type: "hash", fields: [ "tags[*]" ] }); -db.posts.insert({ something: "else" }); -db.posts.insert({ tags: null }); -db.posts.insert({ tags: "this is no array" }); -db.posts.insert({ tags: { content: [1, 2, 3] } }); -``` - -An array index is able to index explicit `null` values. When queried for `null`values, it -will only return those documents having explicitly `null` stored in the array, it will not -return any documents that do not have the array at all. - -```js -db.posts.ensureIndex({ type: "hash", fields: [ "tags[*]" ] }); -db.posts.insert({tags: null}) // Will not be indexed -db.posts.insert({tags: []}) // Will not be indexed -db.posts.insert({tags: [null]}); // Will be indexed for null -db.posts.insert({tags: [null, 1, 2]}); // Will be indexed for null, 1 and 2 -``` - -Declaring an array index as **sparse** does not have an effect on the array part of the index, -this in particular means that explicit `null` values are also indexed in the **sparse** version. -If an index is combined from an array and a normal attribute the sparsity will apply for the attribute e.g.: - -```js -db.posts.ensureIndex({ type: "hash", fields: [ "tags[*]", "name" ], sparse: true }); -db.posts.insert({tags: null, name: "alice"}) // Will not be indexed -db.posts.insert({tags: [], name: "alice"}) // Will not be indexed -db.posts.insert({tags: [1, 2, 3]}) // Will not be indexed -db.posts.insert({tags: [1, 2, 3], name: null}) // Will not be indexed -db.posts.insert({tags: [1, 2, 3], name: "alice"}) -// Will be indexed for [1, "alice"], [2, "alice"], [3, "alice"] -db.posts.insert({tags: [null], name: "bob"}) -// Will be indexed for [null, "bob"] -``` - -Please note that filtering using array indexes only works from within AQL queries and -only if the query filters on the indexed attribute using the `IN` operator. The other -comparison operators (`==`, `!=`, `>`, `>=`, `<`, `<=`, `ANY`, `ALL`, `NONE`) currently -cannot use array indexes. - -Vertex centric indexes ----------------------- - -As mentioned above, the most important indexes for graphs are the edge -indexes, indexing the `_from` and `_to` attributes of edge collections. -They provide very quick access to all edges originating in or arriving -at a given vertex, which allows to quickly find all neighbors of a vertex -in a graph. - -In many cases one would like to run more specific queries, for example -finding amongst the edges originating from a given vertex only those -with a timestamp greater than or equal to some date and time. Exactly this -is achieved with "vertex centric indexes". In a sense these are localized -indexes for an edge collection, which sit at every single vertex. - -Technically, they are implemented in ArangoDB as indexes, which sort the -complete edge collection first by `_from` and then by other attributes -for _OUTBOUND_ traversals, or first by `_to` and then by other attributes -for _INBOUND_ traversals. For traversals in _ANY_ direction two indexes -are needed, one with `_from` and the other with `_to` as first indexed field. - -If we for example have a skiplist index on the attributes `_from` and -`timestamp` of an edge collection, we can answer the above question -very quickly with a single range lookup in the index. - -Since ArangoDB 3.0 one can create sorted indexes (type "skiplist" and -"persistent") that index the special edge attributes `_from` or `_to` -and additionally other attributes. Since ArangoDB 3.1, these are used -in graph traversals, when appropriate `FILTER` statements are found -by the optimizer. - -For example, to create a vertex centric index of the above type, you -would simply do - -```js -db.edges.ensureIndex({"type":"skiplist", "fields": ["_from", "timestamp"]}); -``` - -in arangosh. Then, queries like - -```js -FOR v, e, p IN 1..1 OUTBOUND "V/1" edges - FILTER e.timestamp >= "2018-07-09" - RETURN p -``` - -will be considerably faster in case there are many edges originating -from vertex `"V/1"` but only few with a recent time stamp. Note that the -optimizer may prefer the default edge index over vertex centric indexes -based on the costs it estimates, even if a vertex centric index might -in fact be faster. Vertex centric indexes are more likely to be chosen -for highly connected graphs and with RocksDB storage engine. - - -Creating Indexes in Background ------------------------------- - -Introduced in: v3.5.0 - -{% hint 'info' %} -Background indexing is available for the *RocksDB* storage engine only. -{% endhint %} - -Creating new indexes is by default done under an exclusive collection lock. This means -that the collection (or the respective shards) are not available for write operations -as long as the index is created. This "foreground" index creation can be undesirable, -if you have to perform it on a live system without a dedicated maintenance window. - -Indexes can also be created in "background", not using an -exclusive lock during the entire index creation. The collection remains basically available, -so that other CRUD operations can run on the collection while the index is being created. -This can be achieved by setting the *inBackground* attribute when creating an index. - -To create an index in the background in *arangosh* just specify `inBackground: true`, -like in the following examples: - -```js -// create the hash index in the background -db.collection.ensureIndex({ type: "hash", fields: [ "value" ], unique: false, inBackground: true }); -db.collection.ensureIndex({ type: "hash", fields: [ "email" ], unique: true, inBackground: true }); - -// skiplist indexes work also of course -db.collection.ensureIndex({ type :"skiplist", fields: ["abc", "cdef"], unique: true, inBackground: true }); -db.collection.ensureIndex({ type :"skiplist", fields: ["abc", "cdef"], sparse: true, inBackground: true }); - -// also supported on fulltext indexes -db.collection.ensureIndex({ type: "geo", fields: [ "latitude", "longitude"], inBackground: true }); -db.collection.ensureIndex({ type: "geo", fields: [ "latitude", "longitude"], inBackground: true }); -db.collection.ensureIndex({ type: "fulltext", fields: [ "text" ], minLength: 4, inBackground: true }) -``` - -### Behavior - -Indexes that are still in the build process will not be visible via the ArangoDB APIs. -Nevertheless it is not possible to create the same index twice via the *ensureIndex* API -while an index is still begin created. AQL queries also will not use these indexes until -the index reports back as fully created. Note that the initial *ensureIndex* call or HTTP -request will still block until the index is completely ready. Existing single-threaded -client programs can thus safely set the *inBackground* option to *true* and continue to -work as before. - -{% hint 'info' %} -Should you be building an index in the background you cannot rename or drop the collection. -These operations will block until the index creation is finished. This is equally the case -with foreground indexing. -{% endhint %} - -After an interrupted index build (i.e. due to a server crash) the partially built index -will the removed. In the ArangoDB cluster the index might then be automatically recreated -on affected shards. - -### Performance - -Background index creation might be slower than the "foreground" index creation and require -more RAM. Under a write heavy load (specifically many remove, update or replace operations), -the background index creation needs to keep a list of removed documents in RAM. This might -become unsustainable if this list grows to tens of millions of entries. - -Building an index is always a write heavy operation (internally), it is always a good idea to build indexes -during times with less load. - diff --git a/Documentation/Books/Manual/Indexing/IndexUtilization.md b/Documentation/Books/Manual/Indexing/IndexUtilization.md deleted file mode 100644 index 679790ef6406..000000000000 --- a/Documentation/Books/Manual/Indexing/IndexUtilization.md +++ /dev/null @@ -1,97 +0,0 @@ -Index Utilization -================= - -In most cases ArangoDB will use a single index per collection in a given query. AQL queries can -use more than one index per collection when multiple FILTER conditions are combined with a -logical `OR` and these can be covered by indexes. AQL queries will use a single index per -collection when FILTER conditions are combined with logical `AND`. - -Creating multiple indexes on different attributes of the same collection may give the query -optimizer more choices when picking an index. Creating multiple indexes on different attributes -can also help in speeding up different queries, with FILTER conditions on different attributes. - -It is often beneficial to create an index on more than just one attribute. By adding more attributes -to an index, an index can become more selective and thus reduce the number of documents that -queries need to process. - -ArangoDB's primary indexes, edges indexes and hash indexes will automatically provide selectivity -estimates. Index selectivity estimates are provided in the web interface, the `getIndexes()` return -value and in the `explain()` output for a given query. - -The more selective an index is, the more documents it will filter on average. The index selectivity -estimates are therefore used by the optimizer when creating query execution plans when there are -multiple indexes the optimizer can choose from. The optimizer will then select a combination of -indexes with the lowest estimated total cost. In general, the optimizer will pick the indexes with -the highest estimated selectivity. - -Sparse indexes may or may not be picked by the optimizer in a query. As sparse indexes do not contain -`null` values, they will not be used for queries if the optimizer cannot safely determine whether a -FILTER condition includes `null` values for the index attributes. The optimizer policy is to produce -correct results, regardless of whether or which index is used to satisfy FILTER conditions. If it is -unsure about whether using an index will violate the policy, it will not make use of the index. - - -Troubleshooting ---------------- - -When in doubt about whether and which indexes will be used for executing a given AQL query, -click the *Explain* button in the web interface in the *Queries* view or use -the `explain()` method for the statement as follows (from the ArangoShell): - -```js -var query = "FOR doc IN collection FILTER doc.value > 42 RETURN doc"; -var stmt = db._createStatement(query); -stmt.explain(); -``` - -The `explain()` command will return a detailed JSON representation of the query's execution plan. -The JSON explain output is intended to be used by code. To get a human-readable and much more -compact explanation of the query, there is an explainer tool: - -```js -var query = "FOR doc IN collection FILTER doc.value > 42 RETURN doc"; -require("@arangodb/aql/explainer").explain(query); -``` - -If any of the explain methods shows that a query is not using indexes, the following steps may help: - -* check if the attribute names in the query are correctly spelled. In a schema-free database, documents - in the same collection can have varying structures. There is no such thing as a *non-existing attribute* - error. A query that refers to attribute names not present in any of the documents will not return an - error, and obviously will not benefit from indexes. - -* check the return value of the `getIndexes()` method for the collections used in the query and validate - that indexes are actually present on the attributes used in the query's filter conditions. - -* if indexes are present but not used by the query, the indexes may have the wrong type. For example, a - hash index will only be used for equality comparisons (i.e. `==`) but not for other comparison types such - as `<`, `<=`, `>`, `>=`. Additionally hash indexes will only be used if all of the index attributes are - used in the query's FILTER conditions. A skiplist index will only be used if at least its first attribute - is used in a FILTER condition. If additionally of the skiplist index attributes are specified in the query - (from left-to-right), they may also be used and allow to filter more documents. - -* using indexed attributes as function parameters or in arbitrary expressions will likely lead to the index - on the attribute not being used. For example, the following queries will not use an index on `value`: - - FOR doc IN collection FILTER TO_NUMBER(doc.value) == 42 RETURN doc - FOR doc IN collection FILTER doc.value - 1 == 42 RETURN doc - - In these cases the queries should be rewritten so that only the index attribute is present on one side of - the operator, or additional filters and indexes should be used to restrict the amount of documents otherwise. - -* certain AQL functions such as `WITHIN()` or `FULLTEXT()` do utilize indexes internally, but their use is - not mentioned in the query explanation for functions in general. These functions will raise query errors - (at runtime) if no suitable index is present for the collection in question. - -* the query optimizer will in general pick one index per collection in a query. It can pick more than - one index per collection if the FILTER condition contains multiple branches combined with logical `OR`. - For example, the following queries can use indexes: - - FOR doc IN collection FILTER doc.value1 == 42 || doc.value1 == 23 RETURN doc - FOR doc IN collection FILTER doc.value1 == 42 || doc.value2 == 23 RETURN doc - FOR doc IN collection FILTER doc.value1 < 42 || doc.value2 > 23 RETURN doc - - The two `OR`s in the first query will be converted to an `IN` list, and if there is a suitable index on - `value1`, it will be used. The second query requires two separate indexes on `value1` and `value2` and - will use them if present. The third query can use the indexes on `value1` and `value2` when they are - sorted. diff --git a/Documentation/Books/Manual/Indexing/Persistent.md b/Documentation/Books/Manual/Indexing/Persistent.md deleted file mode 100644 index c1347da4087d..000000000000 --- a/Documentation/Books/Manual/Indexing/Persistent.md +++ /dev/null @@ -1,177 +0,0 @@ -Persistent indexes -================== - -{% hint 'warning' %} -The persistent index type is considered as deprecated from version 3.4.0 on. -It will be removed in 4.0.0. If you use the RocksDB storage engine, you can -replace it with a skiplist index, which uses the same implementation. -{% endhint %} - -Introduction to Persistent Indexes ----------------------------------- - -This is an introduction to ArangoDB's persistent indexes. - -It is possible to define a persistent index on one or more attributes (or paths) -of documents. The index is then used in queries to locate documents within a given range. -If the index is declared unique, then no two documents are allowed to have the same -set of attribute values. - -Creating a new document or updating a document will fail if the uniqueness is violated. -If the index is declared sparse, a document will be excluded from the index and no -uniqueness checks will be performed if any index attribute value is not set or has a value -of `null`. - -Accessing Persistent Indexes from the Shell -------------------------------------------- - - -ensures that a unique persistent index exists -`collection.ensureIndex({ type: "persistent", fields: [ "field1", ..., "fieldn" ], unique: true })` - -Creates a unique persistent index on all documents using *field1*, ... *fieldn* -as attribute paths. At least one attribute path has to be given. The index will -be non-sparse by default. - -All documents in the collection must differ in terms of the indexed -attributes. Creating a new document or updating an existing document will -will fail if the attribute uniqueness is violated. - -To create a sparse unique index, set the *sparse* attribute to `true`: - -`collection.ensureIndex({ type: "persistent", fields: [ "field1", ..., "fieldn" ], unique: true, sparse: true })` - -In a sparse index all documents will be excluded from the index that do not -contain at least one of the specified index attributes or that have a value -of `null` in any of the specified index attributes. Such documents will -not be indexed, and not be taken into account for uniqueness checks. - -In a non-sparse index, these documents will be indexed (for non-present -indexed attributes, a value of `null` will be used) and will be taken into -account for uniqueness checks. - -In case that the index was successfully created, an object with the index -details, including the index-identifier, is returned. - - @startDocuBlockInline ensureUniquePersistentSingle - @EXAMPLE_ARANGOSH_OUTPUT{ensureUniquePersistentSingle} - ~db._create("ids"); - db.ids.ensureIndex({ type: "persistent", fields: [ "myId" ], unique: true }); - db.ids.save({ "myId": 123 }); - db.ids.save({ "myId": 456 }); - db.ids.save({ "myId": 789 }); - db.ids.save({ "myId": 123 }); // xpError(ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) - ~db._drop("ids"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock ensureUniquePersistentSingle - - @startDocuBlockInline ensureUniquePersistentMultiColmun - @EXAMPLE_ARANGOSH_OUTPUT{ensureUniquePersistentMultiColmun} - ~db._create("ids"); - db.ids.ensureIndex({ type: "persistent", fields: [ "name.first", "name.last" ], unique: true }); - db.ids.save({ "name" : { "first" : "hans", "last": "hansen" }}); - db.ids.save({ "name" : { "first" : "jens", "last": "jensen" }}); - db.ids.save({ "name" : { "first" : "hans", "last": "jensen" }}); - db.ids.save({ "name" : { "first" : "hans", "last": "hansen" }}); // xpError(ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) - ~db._drop("ids"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock ensureUniquePersistentMultiColmun - - - - - - -ensures that a non-unique persistent index exists -`collection.ensureIndex({ type: "persistent", fields: [ "field1", ..., "fieldn" ] })` - -Creates a non-unique persistent index on all documents using *field1*, ... -*fieldn* as attribute paths. At least one attribute path has to be given. -The index will be non-sparse by default. - -To create a sparse unique index, set the *sparse* attribute to `true`. - -In case that the index was successfully created, an object with the index -details, including the index-identifier, is returned. - - @startDocuBlockInline ensurePersistent - @EXAMPLE_ARANGOSH_OUTPUT{ensurePersistent} - ~db._create("names"); - db.names.ensureIndex({ type: "persistent", fields: [ "first" ] }); - db.names.save({ "first" : "Tim" }); - db.names.save({ "first" : "Tom" }); - db.names.save({ "first" : "John" }); - db.names.save({ "first" : "Tim" }); - db.names.save({ "first" : "Tom" }); - ~db._drop("names"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock ensurePersistent - - -### Query by example using a persistent index - - -constructs a query-by-example using a persistent index -`collection.byExample(example)` - -Selects all documents from the collection that match the specified example -and returns a cursor. A persistent index will be used if present. - -You can use *toArray*, *next*, or *hasNext* to access the -result. The result can be limited using the *skip* and *limit* -operator. - -An attribute name of the form *a.b* is interpreted as attribute path, -not as attribute. If you use - -```json -{ "a" : { "c" : 1 } } -``` - -as example, then you will find all documents, such that the attribute -*a* contains a document of the form *{c : 1 }*. For example the document - -```json -{ "a" : { "c" : 1 }, "b" : 1 } -``` - -will match, but the document - -```json -{ "a" : { "c" : 1, "b" : 1 } } -``` - -will not. - -However, if you use - -```json -{ "a.c" : 1 }, -``` - -then you will find all documents, which contain a sub-document in *a* -that has an attribute *c* of value *1*. Both the following documents - -```json -{ "a" : { "c" : 1 }, "b" : 1 } -``` -and - -```json -{ "a" : { "c" : 1, "b" : 1 } } -``` -will match. - -Persistent Indexes and Server Language --------------------------------------- - -The order of index entries in persistent indexes adheres to the configured -[server language](../Programs/Arangod/Global.md#default-language). -If, however, the server is restarted with a different language setting as when -the persistent index was created, not all documents may be returned anymore and -the sort order of those which are returned can be wrong (whenever the persistent -index is consulted). - -To fix persistent indexes after a language change, delete and re-create them. -Skiplist indexes are not affected, because they are not persisted and -automatically rebuilt on every server start. diff --git a/Documentation/Books/Manual/Indexing/README.md b/Documentation/Books/Manual/Indexing/README.md deleted file mode 100644 index e547b894a477..000000000000 --- a/Documentation/Books/Manual/Indexing/README.md +++ /dev/null @@ -1,18 +0,0 @@ -Handling Indexes -================ - -This is an introduction to ArangoDB's interface for indexes in general. -There are special sections for - -- [Index Basics](IndexBasics.md): Introduction to all index types -- [Which index to use when](WhichIndex.md): Index type and options adviser -- [Index Utilization](IndexUtilization.md): How ArangoDB uses indexes -- [Working with Indexes](WorkingWithIndexes.md): How to handle indexes - programmatically using the `db` object - - [Hash Indexes](Hash.md) - - [Skiplists](Skiplist.md) - - [Persistent Indexes](Persistent.md) - - [TTL Indexes](Ttl.md) - - [Fulltext Indexes](Fulltext.md) - - [Geo-spatial Indexes](Geo.md) - - [Vertex-centric Indexes](VertexCentric.md) diff --git a/Documentation/Books/Manual/Indexing/Skiplist.md b/Documentation/Books/Manual/Indexing/Skiplist.md deleted file mode 100644 index 456eb002a42d..000000000000 --- a/Documentation/Books/Manual/Indexing/Skiplist.md +++ /dev/null @@ -1,213 +0,0 @@ -Skiplists -========= - -Introduction to Skiplist Indexes --------------------------------- - -This is an introduction to ArangoDB's skiplists. - -It is possible to define a skiplist index on one or more attributes (or paths) -of documents. This skiplist is then used in queries to locate documents -within a given range. If the skiplist is declared unique, then no two documents are -allowed to have the same set of attribute values. - -Creating a new document or updating a document will fail if the uniqueness is violated. -If the skiplist index is declared sparse, a document will be excluded from the index and no -uniqueness checks will be performed if any index attribute value is not set or has a value -of `null`. - -Accessing Skiplist Indexes from the Shell ------------------------------------------ - -### Unique Skiplist Index - - - -Ensures that a unique skiplist index exists: -`collection.ensureIndex({ type: "skiplist", fields: [ "field1", ..., "fieldn" ], unique: true })` - -Creates a unique skiplist index on all documents using *field1*, ... *fieldn* -as attribute paths. At least one attribute path has to be given. The index will -be non-sparse by default. - -All documents in the collection must differ in terms of the indexed -attributes. Creating a new document or updating an existing document will -fail if the attribute uniqueness is violated. - -To create a sparse unique index, set the *sparse* attribute to `true`: - -`collection.ensureIndex({ type: "skiplist", fields: [ "field1", ..., "fieldn" ], unique: true, sparse: true })` - -In a sparse index all documents will be excluded from the index that do not -contain at least one of the specified index attributes or that have a value -of `null` in any of the specified index attributes. Such documents will -not be indexed, and not be taken into account for uniqueness checks. - -In a non-sparse index, these documents will be indexed (for non-present -indexed attributes, a value of `null` will be used) and will be taken into -account for uniqueness checks. - -In case that the index was successfully created, an object with the index -details, including the index-identifier, is returned. - - @startDocuBlockInline ensureUniqueSkiplistSingle - @EXAMPLE_ARANGOSH_OUTPUT{ensureUniqueSkiplistSingle} - ~db._create("ids"); - db.ids.ensureIndex({ type: "skiplist", fields: [ "myId" ], unique: true }); - db.ids.save({ "myId": 123 }); - db.ids.save({ "myId": 456 }); - db.ids.save({ "myId": 789 }); - db.ids.save({ "myId": 123 }); // xpError(ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) - ~db._drop("ids"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock ensureUniqueSkiplistSingle - - @startDocuBlockInline ensureUniqueSkiplistMultiColumn - @EXAMPLE_ARANGOSH_OUTPUT{ensureUniqueSkiplistMultiColumn} - ~db._create("ids"); - db.ids.ensureIndex({ type: "skiplist", fields: [ "name.first", "name.last" ], unique: true }); - db.ids.save({ "name" : { "first" : "hans", "last": "hansen" }}); - db.ids.save({ "name" : { "first" : "jens", "last": "jensen" }}); - db.ids.save({ "name" : { "first" : "hans", "last": "jensen" }}); - db.ids.save({ "name" : { "first" : "hans", "last": "hansen" }}); // xpError(ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) - ~db._drop("ids"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock ensureUniqueSkiplistMultiColumn - - -### Non-unique Skiplist Index - - - -Ensures that a non-unique skiplist index exists: -`collection.ensureIndex({ type: "skiplist", fields: [ "field1", ..., "fieldn" ] })` - -Creates a non-unique skiplist index on all documents using *field1*, ... -*fieldn* as attribute paths. At least one attribute path has to be given. -The index will be non-sparse by default. - -To create a sparse non-unique index, set the *sparse* attribute to `true`. - -`collection.ensureIndex({ type: "skiplist", fields: [ "field1", ..., "fieldn" ], sparse: true })` - -In case that the index was successfully created, an object with the index -details, including the index-identifier, is returned. - - @startDocuBlockInline ensureSkiplist - @EXAMPLE_ARANGOSH_OUTPUT{ensureSkiplist} - ~db._create("names"); - db.names.ensureIndex({ type: "skiplist", fields: [ "first" ] }); - db.names.save({ "first" : "Tim" }); - db.names.save({ "first" : "Tom" }); - db.names.save({ "first" : "John" }); - db.names.save({ "first" : "Tim" }); - db.names.save({ "first" : "Tom" }); - ~db._drop("names"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock ensureSkiplist - -### Skiplist Array Index - -Ensures that a skiplist array index exists (non-unique): -`collection.ensureIndex({ type: "skiplist", fields: [ "field1[*]", ..., "fieldn[*]" ] })` - -Creates a non-unique skiplist array index for the individual elements of the array -attributes field1[*], ... fieldn[*] found in the documents. At least -one attribute path has to be given. The index always treats the indexed arrays as -sparse. - -It is possible to combine array indexing with standard indexing: -`collection.ensureIndex({ type: "skiplist", fields: [ "field1[*]", "field2" ] })` - -In case that the index was successfully created, an object with the index -details, including the index-identifier, is returned. - - @startDocuBlockInline ensureSkiplistArray - @EXAMPLE_ARANGOSH_OUTPUT{ensureSkiplistArray} - ~db._create("test"); - db.test.ensureIndex({ type: "skiplist", fields: [ "a[*]" ] }); - db.test.save({ a : [ 1, 2 ] }); - db.test.save({ a : [ 1, 3 ] }); - db.test.save({ a : null }); - ~db._drop("test"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock ensureSkiplistArray - -### Query by example using a skiplist index - -Constructs a query-by-example using a skiplist index: -`collection.byExample(example)` - -Selects all documents from the collection that match the specified example -and returns a cursor. A skiplist index will be used if present. - -You can use *toArray*, *next*, or *hasNext* to access the -result. The result can be limited using the *skip* and *limit* -operator. - -An attribute name of the form *a.b* is interpreted as attribute path, -not as attribute. If you use - -```json -{ "a" : { "c" : 1 } } -``` - -as example, then you will find all documents, such that the attribute -*a* contains a document of the form *{c : 1 }*. For example the document - -```json -{ "a" : { "c" : 1 }, "b" : 1 } -``` - -will match, but the document - -```json -{ "a" : { "c" : 1, "b" : 1 } } -``` - -will not. - -However, if you use - -```json -{ "a.c" : 1 }, -``` - -then you will find all documents, which contain a sub-document in *a* -that has an attribute *c* of value *1*. Both the following documents - -```json -{ "a" : { "c" : 1 }, "b" : 1 } -``` -and - -```json -{ "a" : { "c" : 1, "b" : 1 } } -``` -will match. - - -Creating Skiplist Index in Background -------------------------------------- - -{% hint 'info' %} -This section only applies to the *rocksdb* storage engine -{% endhint %} - -Creating new indexes is by default done under an exclusive collection lock. This means -that the collection (or the respective shards) are not available as long as the index -is created. This "foreground" index creation can be undesirable, if you have to perform it -on a live system without a dedicated maintenance window. - -Indexes can also be created in "background", not using an exclusive lock during the creation. -The collection remains available, other CRUD operations can run on the collection while the index is created. -This can be achieved by using the *inBackground* option. - -To create a Skiplist index in the background in *arangosh* just specify `inBackground: true`: - -```js -db.collection.ensureIndex({ type: "skiplist", fields: [ "value" ], inBackground: true }); -``` - -For more information see "Creating Indexes in Background" in the [Index basics](IndexBasics.md#) page. - diff --git a/Documentation/Books/Manual/Indexing/Ttl.md b/Documentation/Books/Manual/Indexing/Ttl.md deleted file mode 100644 index 2324c5ce8bbf..000000000000 --- a/Documentation/Books/Manual/Indexing/Ttl.md +++ /dev/null @@ -1,156 +0,0 @@ -TTL Indexes -=========== - -Introduction to TTL (time-to-live) Indexes ------------------------------------------- - -The TTL index provided by ArangoDB is used for removing expired documents -from a collection. - -The TTL index is set up by setting an `expireAfter` value and by selecting a single -document attribute which contains a reference timepoint. For each document, that -reference timepoint can then be specified as a numeric timestamp (Unix timestamp) or -a date string in format `YYYY-MM-DDTHH:MM:SS` with optional milliseconds. -All date strings will be interpreted as UTC dates. - -Documents will count as expired when wall clock time is beyond the per-document -reference timepoint value plus the index' `expireAfter` value added to it. - -### Removing documents at a fixed period after creation / update - -One use case supported by TTL indexes is to remove documents at a fixed duration -after they have been created or last updated. This requires setting up the index -with an attribute that contains the documents' creation or last-updated time. - -Let's assume the index attribute is set to "creationDate", and the `expireAfter` -attribute of the index was set to 600 seconds (10 minutes). - - db.collection.ensureIndex({ type: "ttl", fields: ["creationDate"], expireAfter: 600 }); - -Let's further assume the following document now gets inserted into the collection: - - { "creationDate" : 1550165973 } - -This document will be indexed with a reference timepoint value of `1550165973`, -which translates to the human-readable date/time `2019-02-14T17:39:33.000Z`. The document -will expire 600 seconds afterwards, which is at timestamp `1550166573` (or -`2019-02-14T17:49:33.000Z` in the human-readable version). From that point on, the -document is a candidate for being removed. - -Please note that the numeric date time values for the index attribute should be -specified in seconds since January 1st 1970 (Unix timestamp). To calculate the current -timestamp from JavaScript in this format, there is `Date.now() / 1000`, to calculate it -from an arbitrary `Date` instance, there is `Date.getTime() / 1000`. - -Alternatively, the reference timepoints can be specified as a date string in format -`YYYY-MM-DDTHH:MM:SS` with optional milliseconds. All date strings will be interpreted -as UTC dates. - -The above example document using a datestring attribute value would be - - { "creationDate" : "2019-02-14T17:39:33.000Z" } - -Now any data-modification access to the document could update the value in the document's -`creationDate` attribute to the current date/time, which would prolong the existence -of the document and keep it from being expired and removed. - -Setting a document's reference timepoint on initial insertion or updating it on every -subsequent modification of the document will not be performed by ArangoDB. Instead, it -is the tasks of client applications to set and update the reference timepoints whenever -the use case requires it. - -### Removing documents at certain points in time - -Another use case is to specify a per-document expiration/removal timepoint, and setting -the `expireAfter` attribute to a low value (e.g. 0 seconds). - -Let's assume the index attribute is set to "expireDate", and the `expireAfter` -attribute of the index was set to 0 seconds (immediately when wall clock time reaches -the value specified in `expireDate`). - - db.collection.ensureIndex({ type: "ttl", fields: ["expireDate"], expireAfter: 0 }); - -When storing the following document in the collection, it will expire at the timepoint -specified in the document itself: - - { "expireDate" : "2019-03-28T01:06:00Z" } - -As `expireAfter` was set to 0, the document will count as expired when wall clock time -has reached the timeout. - -It should be intuitive to see that the `expireDate` can be differently per document. -This allows mixing of documents with different expiration periods by calculating their -expiration dates differently in the client application. - -### Preventing documents from being removed - -In case the index attribute does not contain a numeric value nor a proper date string, -the document will not be stored in the TTL index and thus will not become a candidate -for expiration and removal. Providing either a non-numeric value or even no value for -the index attribute is a supported way to keep documents from being expired and removed. - -### Limitations - -The actual removal of expired documents will not necessarily happen immediately when -they have reached their expiration time. -Expired documents will eventually be removed by a background thread that is periodically -going through all TTL indexes and removing the expired documents. - -There is no guarantee when exactly the removal of expired documents will be carried -out, so queries may still find and return documents that have already expired. These -will eventually be removed when the background thread kicks in and has spare capacity to -remove the expired documents. It is guaranteed however that only documents which are -past their expiration time will actually be removed. - -The frequency for invoking the background removal thread can be configured using -the `--ttl.frequency` startup option. The frequency is specified in milliseconds. - -In order to avoid "random" load spikes by the background thread suddenly kicking -in and removing a lot of documents at once, the number of to-be-removed documents -per thread invocation can be capped. -The total maximum number of documents to be removed per thread invocation is -controlled by the startup option `--ttl.max-total-removes`. The maximum number of -documents in a single collection at once can be controlled by the startup option -`--ttl.max-collection-removes`. - -There can at most be one TTL index per collection. It is not recommended to rely on -TTL indexes for user-land AQL queries. This is because TTL indexes may store a transformed, -always numerical version of the index attribute value even if it was originally passed -in as a datestring. - -Please note that there is one background thread per ArangoDB database server instance -for performing the removal of expired documents of all collections in all databases. -If the number of databases and collections with TTL indexes is high and there are many -documents to remove from these, the background thread may at least temporarily lag -behind with its removal operations. It should eventually catch up in case the number -of to-be-removed documents per invocation is not higher than the background thread's -configured threshold values. - -Please also note that TTL indexes are designed exactly for the purpose of removing -expired documents from collections. It is *not recommended* to rely on TTL indexes -for user-land AQL queries. This is because TTL indexes internally may store a transformed, -always numerical version of the index attribute value even if it was originally passed in -as a datestring. As a result TTL indexes will likely not be used for filtering and sort -operations in user-land AQL queries. - - -Accessing TTL Indexes from the Shell -------------------------------------- - -Ensures that a TTL index exists: -`collection.ensureIndex({ type: "ttl", fields: [ "field" ], expireAfter: 600 })` - -Creates a TTL index on all documents using *field* as attribute path. Exactly -one attribute path has to be given. The index will be sparse in all cases. - -In case that the index was successfully created, an object with the index -details, including the index-identifier, is returned. - - @startDocuBlockInline ensureTtlIndex - @EXAMPLE_ARANGOSH_OUTPUT{ensureTtlIndex} - ~db._create("test"); - db.test.ensureIndex({ type: "ttl", fields: [ "creationDate" ], expireAfter: 600 }); - for (let i = 0; i < 100; ++i) { db.test.insert({ creationDate: Date.now() / 1000 }); } - ~db._drop("test"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock ensureTtlIndex diff --git a/Documentation/Books/Manual/Indexing/VertexCentric.md b/Documentation/Books/Manual/Indexing/VertexCentric.md deleted file mode 100644 index 6150fc922362..000000000000 --- a/Documentation/Books/Manual/Indexing/VertexCentric.md +++ /dev/null @@ -1,71 +0,0 @@ -Vertex Centric Indexes -====================== - -Introduction to Vertex Centric Indexes --------------------------------------- - -In ArangoDB there are special indices designed to speed up graph operations, -especially if the graph contains supernodes (vertices that have an exceptionally -high amount of connected edges). -These indices are called vertex centric indexes and can be used in addition -to the existing edge index. - -Motivation ----------- - -The idea of this index is to index a combination of a vertex, the direction and any arbitrary -set of other attributes on the edges. -To take an example, if we have an attribute called `type` on the edges, we can use an outbound -vertex-centric index on this attribute to find all edges attached to a vertex with a given `type`. -The following query example could benefit from such an index: - - FOR v, e, p IN 3..5 OUTBOUND @start GRAPH @graphName - FILTER p.edges[*].type ALL == "friend" - RETURN v - -Using the built-in edge-index ArangoDB can find the list of all edges attached to the vertex fast, -but still it has to walk through this list and check if all of them have the attribute `type == "friend"`. -Using a vertex-centric index would allow ArangoDB to find all edges for the vertex having the attribute `type == "friend"` -in the same time and can save the iteration to verify the condition. - -Index creation --------------- - -A vertex-centric can be either of the following types: - -* [Hash Index](Hash.md) -* [Skiplist Index](Skiplist.md) -* [Persistent Index](Persistent.md) - -And is created using their creation operations. -However in the list of fields used to create the index we have to include either `_from` or `_to`. -Let us again explain this by an example. -Assume we want to create an hash-based outbound vertex-centric index on the attribute `type`. -This can be created with the following way: - - @startDocuBlockInline ensureVertexCentricHashIndex - @EXAMPLE_ARANGOSH_OUTPUT{ensureVertexCentricHashIndex} - ~db._createEdgeCollection("collection"); - db.collection.ensureIndex({ type: "hash", fields: [ "_from", "type" ] }) - ~db._drop("collection"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock ensureVertexCentricHashIndex - -All options that are supported by the respective indexes are supported by the vertex-centric index as well. - -Index usage ------------ - -The AQL optimizer can decide to use a vertex-centric whenever suitable, however it is not guaranteed that this -index is used, the optimizer may estimate that an other index is assumed to be better. -The optimizer will consider this type of indexes on explicit filtering of `_from` respectively `_to`: - - FOR edge IN collection - FILTER edge._from == "vertices/123456" AND edge.type == "friend" - RETURN edge - -and during pattern matching queries: - - FOR v, e, p IN 3..5 OUTBOUND @start GRAPH @graphName - FILTER p.edges[*].type ALL == "friend" - RETURN v diff --git a/Documentation/Books/Manual/Indexing/WhichIndex.md b/Documentation/Books/Manual/Indexing/WhichIndex.md deleted file mode 100644 index 1769918e7766..000000000000 --- a/Documentation/Books/Manual/Indexing/WhichIndex.md +++ /dev/null @@ -1,209 +0,0 @@ -Which Index to use when -======================= - -ArangoDB automatically indexes the `_key` attribute in each collection. There -is no need to index this attribute separately. Please note that a document's -`_id` attribute is derived from the `_key` attribute, and is thus implicitly -indexed, too. - -ArangoDB will also automatically create an index on `_from` and `_to` in any -edge collection, meaning incoming and outgoing connections can be determined -efficiently. - -Index types ------------ - -Users can define additional indexes on one or multiple document attributes. -Several different index types are provided by ArangoDB. These indexes have -different usage scenarios: - -- hash index: provides quick access to individual documents if (and only if) - all indexed attributes are provided in the search query. The index will only - be used for equality comparisons. It does not support range queries and - cannot be used for sorting. - - The hash index is a good candidate if all or most queries on the indexed - attribute(s) are equality comparisons. The unique hash index provides an - amortized complexity of O(1) for insert, update, remove and lookup operations. - The non-unique hash index provides O(1) inserts, updates and removes, and - will allow looking up documents by index value with amortized O(n) complexity, - with *n* being the number of documents with that index value. - - A non-unique hash index on an optional document attribute should be declared - sparse so that it will not index documents for which the index attribute is - not set. - -- skiplist index: skiplists keep the indexed values in an order, so they can - be used for equality lookups, range queries and for sorting. For high selectivity - attributes, skiplist indexes will have a higher overhead than hash indexes. For - low selectivity attributes, skiplist indexes will be more efficient than non-unique - hash indexes. - - Additionally, skiplist indexes allow more use cases (e.g. range queries, sorting) - than hash indexes. Furthermore, they can be used for lookups based on a leftmost - prefix of the index attributes. - -- persistent index: a persistent index behaves much like the sorted skiplist index, - except that all index values are persisted on disk and do not need to be rebuilt - in memory when the server is restarted or the indexed collection is reloaded. - The operations in a persistent index have logarithmic complexity, but operations - have may have a higher constant factor than the operations in a skiplist index, - because the persistent index may need to make extra roundtrips to the primary - index to fetch the actual documents. - - A persistent index can be used for equality lookups, range queries and for sorting. - For high selectivity attributes, persistent indexes will have a higher overhead than - skiplist or hash indexes. - - Persistent indexes allow more use cases (e.g. range queries, sorting) than hash - indexes. Furthermore, they can be used for lookups based on a leftmost prefix of the - index attributes. In contrast to the in-memory skiplist indexes, persistent indexes - do not need to be rebuilt in-memory so they don't influence the loading time of - collections as other in-memory indexes do. - -- ttl index: the TTL index provided by ArangoDB can be used for automatically removing - expired documents from a collection. - - The TTL index is set up by setting an `expireAfter` value and by picking a single - document attribute which contains the documents' reference timepoint. Documents - are expired `expireAfter` seconds after their reference timepoint has been reached. - The documents' reference timepoint is specified as either a numeric timestamp - (Unix timestamp) or a date string in format `YYYY-MM-DDTHH:MM:SS` with optional - milliseconds. All date strings will be interpreted as UTC dates. - - For example, if `expireAfter` is set to 600 seconds (10 minutes) and the index - attribute is "creationDate" and there is the following document: - - { "creationDate" : 1550165973 } - - This document will be indexed with a creation date time value of `1550165973`, - which translates to the human-readable date `2019-02-14T17:39:33.000Z`. The document - will expire 600 seconds afterwards, which is at timestamp `1550166573` (or - `2019-02-14T17:49:33.000Z` in the human-readable version). - - The actual removal of expired documents will not necessarily happen immediately. - Expired documents will eventually removed by a background thread that is periodically - going through all TTL indexes and removing the expired documents. The frequency for - invoking this background thread can be configured using the `--ttl.frequency` - startup option. - - There is no guarantee when exactly the removal of expired documents will be carried - out, so queries may still find and return documents that have already expired. These - will eventually be removed when the background thread kicks in and has capacity to - remove the expired documents. It is guaranteed however that only documents which are - past their expiration time will actually be removed. - - Please note that the numeric date time values for the index attribute should be - specified in seconds since January 1st 1970 (Unix timestamp). To calculate the current - timestamp from JavaScript in this format, there is `Date.now() / 1000`, to calculate it - from an arbitrary Date instance, there is `Date.getTime() / 1000`. - - Alternatively, the index attribute values can be specified as a date string in format - `YYYY-MM-DDTHH:MM:SS` with optional milliseconds. All date strings will be interpreted - as UTC dates. - - The above example document using a date string attribute value would be - - { "creationDate" : "2019-02-14T17:39:33.000Z" } - - In case the index attribute does not contain a numeric value nor a proper date string, - the document will not be stored in the TTL index and thus will not become a candidate - for expiration and removal. Providing either a non-numeric value or even no value for - the index attribute is a supported way of keeping documents from being expired and removed. - - TTL indexes are designed exactly for the purpose of removing expired documents from - a collection. It is *not recommended* to rely on TTL indexes for user-land AQL queries. - This is because TTL indexes internally may store a transformed, always numerical version - of the index attribute value even if it was originally passed in as a datestring. As a - result TTL indexes will likely not be used for filtering and sort operations in user-land - AQL queries. - -- geo index: the geo index provided by ArangoDB allows searching for documents - within a radius around a two-dimensional earth coordinate (point), or to - find documents with are closest to a point. Document coordinates can either - be specified in two different document attributes or in a single attribute, e.g. - - { "latitude": 50.9406645, "longitude": 6.9599115 } - - or - - { "coords": [ 50.9406645, 6.9599115 ] } - - Geo indexes will be invoked via special functions or AQL optimization. The - optimization can be triggered when a collection with geo index is enumerated - and a SORT or FILTER statement is used in conjunction with the distance - function. - -- fulltext index: a fulltext index can be used to index all words contained in - a specific attribute of all documents in a collection. Only words with a - (specifiable) minimum length are indexed. Word tokenization is done using - the word boundary analysis provided by libicu, which is taking into account - the selected language provided at server start. - - The index supports complete match queries (full words) and prefix queries. - Fulltext indexes will only be invoked via special functions. - -Sparse vs. non-sparse indexes ------------------------------ - -Hash indexes and skiplist indexes can optionally be created sparse. A sparse index -does not contain documents for which at least one of the index attribute is not set -or contains a value of `null`. - -As such documents are excluded from sparse indexes, they may contain fewer documents than -their non-sparse counterparts. This enables faster indexing and can lead to reduced memory -usage in case the indexed attribute does occur only in some, but not all documents of the -collection. Sparse indexes will also reduce the number of collisions in non-unique hash -indexes in case non-existing or optional attributes are indexed. - -In order to create a sparse index, an object with the attribute `sparse` can be added to -the index creation commands: - -```js -db.collection.ensureIndex({ type: "hash", fields: [ "attributeName" ], sparse: true }); -db.collection.ensureIndex({ type: "hash", fields: [ "attributeName1", "attributeName2" ], sparse: true }); -db.collection.ensureIndex({ type: "hash", fields: [ "attributeName" ], unique: true, sparse: true }); -db.collection.ensureIndex({ type: "hash", fields: [ "attributeName1", "attributeName2" ], unique: true, sparse: true }); - -db.collection.ensureIndex({ type: "skiplist", fields: [ "attributeName" ], sparse: true }); -db.collection.ensureIndex({ type: "skiplist", fields: [ "attributeName1", "attributeName2" ], sparse: true }); -db.collection.ensureIndex({ type: "skiplist", fields: [ "attributeName" ], unique: true, sparse: true }); -db.collection.ensureIndex({ type: "skiplist", fields: [ "attributeName1", "attributeName2" ], unique: true, sparse: true }); -``` - -When not explicitly set, the `sparse` attribute defaults to `false` for new indexes. -Other indexes than hash and skiplist do not support sparsity. - -As sparse indexes may exclude some documents from the collection, they cannot be used for -all types of queries. Sparse hash indexes cannot be used to find documents for which at -least one of the indexed attributes has a value of `null`. For example, the following AQL -query cannot use a sparse index, even if one was created on attribute `attr`: - - FOR doc In collection - FILTER doc.attr == null - RETURN doc - -If the lookup value is non-constant, a sparse index may or may not be used, depending on -the other types of conditions in the query. If the optimizer can safely determine that -the lookup value cannot be `null`, a sparse index may be used. When uncertain, the optimizer -will not make use of a sparse index in a query in order to produce correct results. - -For example, the following queries cannot use a sparse index on `attr` because the optimizer -will not know beforehand whether the values which are compared to `doc.attr` will include `null`: - - FOR doc In collection - FILTER doc.attr == SOME_FUNCTION(...) - RETURN doc - - FOR other IN otherCollection - FOR doc In collection - FILTER doc.attr == other.attr - RETURN doc - -Sparse skiplist indexes can be used for sorting if the optimizer can safely detect that the -index range does not include `null` for any of the index attributes. - -Note that if you intend to use [joins](../../AQL/Examples/Join.html) it may be clever -to use non-sparsity and maybe even uniqueness for that attribute, else all items containing -the `null` value will match against each other and thus produce large results. - diff --git a/Documentation/Books/Manual/Indexing/WorkingWithIndexes.md b/Documentation/Books/Manual/Indexing/WorkingWithIndexes.md deleted file mode 100644 index 427015876b53..000000000000 --- a/Documentation/Books/Manual/Indexing/WorkingWithIndexes.md +++ /dev/null @@ -1,275 +0,0 @@ -Working with Indexes -==================== - -Learn how to use different indexes efficiently by going through the -[ArangoDB Performance Course](https://www.arangodb.com/arangodb-performance-course/). - -Index Identifiers and Handles ------------------------------ - -An *index handle* uniquely identifies an index in the database. It is a string and -consists of the collection name and an *index identifier* separated by a `/`. The -index identifier part is a numeric value that is auto-generated by ArangoDB. - -A specific index of a collection can be accessed using its *index handle* or -*index identifier* as follows: - -```js -db.collection.index(""); -db.collection.index(""); -db._index(""); -``` - -For example: Assume that the index handle, which is stored in the `_id` -attribute of the index, is `demo/362549736` and the index was created in a collection -named `demo`. Then this index can be accessed as: - -```js -db.demo.index("demo/362549736"); -``` - -Because the index handle is unique within the database, you can leave out the -*collection* and use the shortcut: - -```js -db._index("demo/362549736"); -``` - -An index may also be looked up by its name. Since names are only unique within -a collection, rather than within the database, the lookup must also include the -collection name. - -```js -db._index("demo/primary") -db.demo.index("primary") -``` - -Collection Methods ------------------- - -### Listing all indexes of a collection - - - -returns information about the indexes -`getIndexes()` - -Returns an array of all indexes defined for the collection. -Since ArangoDB 3.4, `indexes()` is an alias for `getIndexes()`. - -Note that `_key` implicitly has an index assigned to it. - - @startDocuBlockInline collectionGetIndexes - @EXAMPLE_ARANGOSH_OUTPUT{collectionGetIndexes} - ~db._create("test"); - ~db.test.ensureUniqueSkiplist("skiplistAttribute"); - ~db.test.ensureUniqueSkiplist("skiplistUniqueAttribute"); - |~db.test.ensureHashIndex("hashListAttribute", - "hashListSecondAttribute.subAttribute"); - db.test.getIndexes(); - ~db._drop("test"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionGetIndexes - - -### Creating an index -Indexes should be created using the general method *ensureIndex*. This -method obsoletes the specialized index-specific methods *ensureHashIndex*, -*ensureSkiplist*, *ensureUniqueConstraint* etc. - - - - -ensures that an index exists -`collection.ensureIndex(index-description)` - -Ensures that an index according to the *index-description* exists. A -new index will be created if none exists with the given description. - -The *index-description* must contain at least a *type* attribute. -Other attributes may be necessary, depending on the index type. - -**type** can be one of the following values: -- *hash*: hash index -- *skiplist*: skiplist index -- *fulltext*: fulltext index -- *geo*: geo index, with _one_ or _two_ attributes - -**name** can be a string. Index names are subject to the same character -restrictions as collection names. If omitted, a name will be auto-generated so -that it is unique with respect to the collection, e.g. `idx_832910498`. - -**sparse** can be *true* or *false*. - -For *hash*, and *skiplist* the sparsity can be controlled, *fulltext* and *geo* -are [sparse](WhichIndex.md) by definition. - -**unique** can be *true* or *false* and is supported by *hash* or *skiplist* - -Calling this method returns an index object. Whether or not the index -object existed before the call is indicated in the return attribute -*isNewlyCreated*. - -**deduplicate** can be *true* or *false* and is supported by array indexes of -type *hash* or *skiplist*. It controls whether inserting duplicate index values -from the same document into a unique array index will lead to a unique constraint -error or not. The default value is *true*, so only a single instance of each -non-unique index value will be inserted into the index per document. Trying to -insert a value into the index that already exists in the index will always fail, -regardless of the value of this attribute. - - -**Examples** - - - @startDocuBlockInline collectionEnsureIndex - @EXAMPLE_ARANGOSH_OUTPUT{collectionEnsureIndex} - ~db._create("test"); - db.test.ensureIndex({ type: "hash", fields: [ "a" ], sparse: true }); - db.test.ensureIndex({ type: "hash", fields: [ "a", "b" ], unique: true }); - ~db._drop("test"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock collectionEnsureIndex - - - -### Dropping an index via a collection handle - - - -drops an index -`collection.dropIndex(index)` - -Drops the index. If the index does not exist, then *false* is -returned. If the index existed and was dropped, then *true* is -returned. Note that you cannot drop some special indexes (e.g. the primary -index of a collection or the edge index of an edge collection). - -`collection.dropIndex(index-handle)` - -Same as above. Instead of an index an index handle can be given. - - @startDocuBlockInline col_dropIndex - @EXAMPLE_ARANGOSH_OUTPUT{col_dropIndex} - ~db._create("example"); - db.example.ensureSkiplist("a", "b"); - var indexInfo = db.example.getIndexes(); - indexInfo; - db.example.dropIndex(indexInfo[0]) - db.example.dropIndex(indexInfo[1].id) - indexInfo = db.example.getIndexes(); - ~db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock col_dropIndex - - -### Load Indexes into Memory - - - -Loads all indexes of this collection into Memory. -`collection.loadIndexesIntoMemory()` - -This function tries to cache all index entries -of this collection into the main memory. -Therefore it iterates over all indexes of the collection -and stores the indexed values, not the entire document data, -in memory. -All lookups that could be found in the cache are much faster -than lookups not stored in the cache so you get a nice performance boost. -It is also guaranteed that the cache is consistent with the stored data. - -For the time being this function is only useful on RocksDB storage engine, -as in MMFiles engine all indexes are in memory anyways. - -On RocksDB this function honors all memory limits, if the indexes you want -to load are smaller than your memory limit this function guarantees that most -index values are cached. -If the index is larger than your memory limit this function will fill up values -up to this limit and for the time being there is no way to control which indexes -of the collection should have priority over others. - - @startDocuBlockInline LoadIndexesIntoMemory - @EXAMPLE_ARANGOSH_OUTPUT{loadIndexesIntoMemory} - ~db._drop("example"); - ~db._createEdgeCollection("example"); - db.example.loadIndexesIntoMemory(); - ~db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock LoadIndexesIntoMemory - -Database Methods ----------------- - -### Fetching an index by handle - - - -finds an index -`db._index(index-handle)` - -Returns the index with *index-handle* or null if no such index exists. - - @startDocuBlockInline IndexHandle - @EXAMPLE_ARANGOSH_OUTPUT{IndexHandle} - ~db._create("example"); - db.example.ensureIndex({ type: "skiplist", fields: [ "a", "b" ] }); - var indexInfo = db.example.getIndexes().map(function(x) { return x.id; }); - indexInfo; - db._index(indexInfo[0]) - db._index(indexInfo[1]) - ~db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock IndexHandle - - -### Dropping an index via a database handle - - - -drops an index -`db._dropIndex(index)` - -Drops the *index*. If the index does not exist, then *false* is -returned. If the index existed and was dropped, then *true* is -returned. - -`db._dropIndex(index-handle)` - -Drops the index with *index-handle*. - - @startDocuBlockInline dropIndex - @EXAMPLE_ARANGOSH_OUTPUT{dropIndex} - ~db._create("example"); - db.example.ensureIndex({ type: "skiplist", fields: [ "a", "b" ] }); - var indexInfo = db.example.getIndexes(); - indexInfo; - db._dropIndex(indexInfo[0]) - db._dropIndex(indexInfo[1].id) - indexInfo = db.example.getIndexes(); - ~db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock dropIndex - - -### Revalidating whether an index is used - - - -finds an index - -So you've created an index, and since its maintainance isn't for free, -you definitely want to know whether your query can utilize it. - -You can use explain to verify whether **skiplists** or **hash indexes** are -used (if you omit `colors: false` you will get nice colors in ArangoShell): - - @startDocuBlockInline IndexVerify - @EXAMPLE_ARANGOSH_OUTPUT{IndexVerify} - ~db._create("example"); - var explain = require("@arangodb/aql/explainer").explain; - db.example.ensureIndex({ type: "skiplist", fields: [ "a", "b" ] }); - explain("FOR doc IN example FILTER doc.a < 23 RETURN doc", {colors:false}); - ~db._drop("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock IndexVerify diff --git a/Documentation/Books/Manual/Installation/Compiling.md b/Documentation/Books/Manual/Installation/Compiling.md deleted file mode 100644 index a72fba4352b6..000000000000 --- a/Documentation/Books/Manual/Installation/Compiling.md +++ /dev/null @@ -1,18 +0,0 @@ -Compiling ArangoDB from Source -=============================== - -ArangoDB can be compiled directly from source. It will compile on most Linux and -macOS systems, as well as on Windows. - -We assume that you use the GNU C/C++ compiler or clang/clang++ to compile the -source. ArangoDB has been tested with these compilers, but should be able to -compile with any Posix-compliant, C++14-enabled compiler. - -By default, cloning the GitHub repository will checkout the _devel_ branch. -This branch contains the development version of the ArangoDB. Use this branch if -you want to make changes to the ArangoDB source. - -On Windows you first [need to allow and enable symlinks for your user](https://github.com/git-for-windows/git/wiki/Symbolic-Links#allowing-non-administrators-to-create-symbolic-links). - -Please checkout the [cookbook](../../Cookbook/Compiling/index.html) on how to -compile ArangoDB. diff --git a/Documentation/Books/Manual/Installation/Linux.md b/Documentation/Books/Manual/Installation/Linux.md deleted file mode 100644 index a5b7a9687312..000000000000 --- a/Documentation/Books/Manual/Installation/Linux.md +++ /dev/null @@ -1,63 +0,0 @@ -Installing ArangoDB on Linux -============================ - -To install ArangoDB on Linux: - -1. Visit the official [Download](https://www.arangodb.com/download) page of the - ArangoDB web site and download the correct package for your Linux distribution. - You can find binary packages for the most common distributions there. Linux Mint: - please use the corresponding Ubuntu or Debian packages. -2. Follow the installation instructions on the _Download_ page to use your - favorite package manager for the major distributions. After setting up the ArangoDB - repository you can easily install ArangoDB using _yum_, _aptitude_, _urpmi_ or _zypper_. - -{% hint 'info' %} -In addition to installation packages (distribution dependent) a `tar.gz` archive -is available starting from version 3.4.0. -{% endhint %} - -After installation, you may start ArangoDB in several ways. The exact start-up command -depends on your Linux distribution, as well as on the type of ArangoDB deployment you -are interested in (_Single Server_, _Master-Slave_, _Active Failover_, _Cluster_, _DC2DC_). - -Please refer to the [_Deployment_](../Deployment/README.md) chapter for details. - -Securing your Installation --------------------------- - -### Debian / Ubuntu - -Debian based packages will ask for a password during installation. - -#### Securing Unattended Installations on Debian - -For unattended installations, you can set the password using the -[debconf helpers](http://www.microhowto.info/howto/perform_an_unattended_installation_of_a_debian_package.html): - -``` -echo arangodb3 arangodb3/password password NEWPASSWORD | debconf-set-selections -echo arangodb3 arangodb3/password_again password NEWPASSWORD | debconf-set-selections -``` - -The commands above should be executed prior to the installation. - -### Red-Hat / CentOS - -Red-Hat based packages will set a random password during installation. The generated -random password is printed during the installation. Please write it down somewhere, -or change it to a password of your choice by executing: - -``` -ARANGODB_DEFAULT_ROOT_PASSWORD=NEWPASSWORD arango-secure-installation -``` - -The command should be executed after the installation. - -### Other Distributions - -For other distributions run `arango-secure-installation` to set a _root_ password. - -{% hint 'danger' %} -Please be aware that running `arango-secure-installation` on your ArangoDB Server will remove -all current database users but root. -{% endhint %} diff --git a/Documentation/Books/Manual/Installation/LinuxOSConfiguration.md b/Documentation/Books/Manual/Installation/LinuxOSConfiguration.md deleted file mode 100644 index bc5a5ddeb7d6..000000000000 --- a/Documentation/Books/Manual/Installation/LinuxOSConfiguration.md +++ /dev/null @@ -1,150 +0,0 @@ -Linux Operating System Configuration -==================================== - -{% hint 'tip' %} -The most important suggestions listed in this section can be -easily applied by making use of a script. Please refer to the page -[Linux OS Tuning Script Examples](LinuxOSTuningScripts.md) for -ready-to-use examples. -{% endhint %} - -File Systems ------------- - -We recommend to **not** use BTRFS on linux, as it is known to not work -well in conjunction with ArangoDB. We experienced that ArangoDB -facing latency issues on accessing its database files on BTRFS -partitions. In conjunction with BTRFS and AUFS we also saw data loss -on restart. - -Virtual Memory Page Sizes --------------------------- - -By default, ArangoDB uses Jemalloc as the memory allocator. Jemalloc does a good -job of reducing virtual memory fragmentation, especially for long-running -processes. Unfortunately, some OS configurations can interfere with Jemalloc's -ability to function properly. Specifically, Linux's "transparent hugepages", -Windows' "large pages" and other similar features sometimes prevent Jemalloc -from returning unused memory to the operating system and result in unnecessarily -high memory use. Therefore, we recommend disabling these features when using -Jemalloc with ArangoDB. Please consult your operating system's documentation for -how to do this. - -Execute: - -``` -sudo bash -c "echo madvise >/sys/kernel/mm/transparent_hugepage/enabled" -sudo bash -c "echo madvise >/sys/kernel/mm/transparent_hugepage/defrag" -``` - -before executing `arangod`. - -Swap Space ----------- - -It is recommended to assign swap space for a server that is running arangod. -Configuring swap space can prevent the operating system's OOM killer from -killing ArangoDB too eagerly on Linux. - -### Over-Commit Memory - -The recommended kernel setting for `overcommit_memory` for both MMFiles and -RocksDB storage engine is 0 or 1. The kernel default is 0. - -You can set it as follows before executing `arangod`: - -``` -sudo bash -c "echo 0 >/proc/sys/vm/overcommit_memory" -``` - -From [www.kernel.org](https://www.kernel.org/doc/Documentation/sysctl/vm.txt): - -- When this flag is 0, the kernel attempts to estimate the amount - of free memory left when userspace requests more memory. - -- When this flag is 1, the kernel pretends there is always enough - memory until it actually runs out. - -- When this flag is 2, the kernel uses a "never overcommit" - policy that attempts to prevent any overcommit of memory. - -### Zone Reclaim - -Execute - -``` -sudo bash -c "echo 0 >/proc/sys/vm/zone_reclaim_mode" -``` - -before executing `arangod`. - -From [www.kernel.org](https://www.kernel.org/doc/Documentation/sysctl/vm.txt): - -This is value ORed together of - -- 1 = Zone reclaim on -- 2 = Zone reclaim writes dirty pages out -- 4 = Zone reclaim swaps pages - -NUMA ----- - -Multi-processor systems often have non-uniform Access Memory (NUMA). ArangoDB -should be started with interleave on such system. This can be achieved using - -``` -numactl --interleave=all arangod ... -``` - -Max Memory Mappings -------------------- - -Linux kernels by default restrict the maximum number of memory mappings of a -single process to about 64K mappings. While this value is sufficient for most -workloads, it may be too low for a process that has lots of parallel threads -that all require their own memory mappings. In this case all the threads' -memory mappings will be accounted to the single arangod process, and the -maximum number of 64K mappings may be reached. When the maximum number of -mappings is reached, calls to mmap will fail, so the process will think no -more memory is available although there may be plenty of RAM left. - -To avoid this scenario, it is recommended to raise the default value for the -maximum number of memory mappings to a sufficiently high value. As a rule of -thumb, one could use 8 times the number of available cores times 8,000. - -For a 32 core server, a good rule-of-thumb value thus would be 2,048,000 -(32 * 8 * 8000). For certain workloads, it may be sensible to use even a higher -value for the number of memory mappings. - -To set the value once, use the following command before starting arangod: - -``` -sudo bash -c "sysctl -w 'vm.max_map_count=2048000'" -``` - -To make the settings durable, it will be necessary to store the adjusted -settings in /etc/sysctl.conf or other places that the operating system is -looking at. - -Environment Variables ---------------------- - -It is recommended to set the environment variable `GLIBCXX_FORCE_NEW` to 1 on -systems that use glibc++ in order to disable the memory pooling built into -glibc++. That memory pooling is unnecessary because Jemalloc will already do -memory pooling. - -Execute - -``` -export GLIBCXX_FORCE_NEW=1 -``` - -before starting `arangod`. - -32bit ------ - -While it is possible to compile ArangoDB on 32bit system, this is not a -recommended environment. 64bit systems can address a significantly bigger -memory region. diff --git a/Documentation/Books/Manual/Installation/LinuxOSTuningScripts.md b/Documentation/Books/Manual/Installation/LinuxOSTuningScripts.md deleted file mode 100644 index df8686e75d37..000000000000 --- a/Documentation/Books/Manual/Installation/LinuxOSTuningScripts.md +++ /dev/null @@ -1,132 +0,0 @@ -Linux OS Tuning Script Examples -=============================== - -The most important suggestions listed in the section -[Linux Operating System Configuration](LinuxOSConfiguration.md) -can be easily applied by making use of a script and _init.d_. - -This page includes script examples that can be used to tune the -operating system (OS) in case you are using _Debian_ or _CentOS_, -along with instructions on how to install the scripts. - -{% hint 'warning' %} -It is important that the script is set up in a way that it gets -executed even after the machine reboots. Instructions on how to -configure your system so that the script executes during the -boot process can be found below. -{% endhint %} - -Debian ------- - -**Script:** - -```bash -#!/bin/bash - -### BEGIN INIT INFO -# Provides: arangodb-memory-configuration -# Required-Start: -# Required-Stop: -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: Set arangodb kernel parameters -# Description: Set arangodb kernel parameters -### END INIT INFO - -# 1 - Raise the vm map count value -sudo sysctl -w "vm.max_map_count=2048000" - -# 2 - Disable Transparent Huge Pages -sudo bash -c "echo madvise > /sys/kernel/mm/transparent_hugepage/enabled" -sudo bash -c "echo madvise > /sys/kernel/mm/transparent_hugepage/defrag" - -# 3 - Set the virtual memory accounting mode -sudo bash -c "echo 0 > /proc/sys/vm/overcommit_memory" -``` - -**Installation Instructions:** - -1. Create the file inside the `/etc/init.d/` directory, e.g. - - `/etc/init.d/arangodb-os-optimization` - -2. Set correct permission, to mark the file executable: - - `sudo chmod 755 /etc/init.d/arangodb-os-optimization` - -3. On Ubuntu, use the following command to configure your system - to execute the script during the boot process: - - `sudo update-rc.d arangodb-os-optimization defaults` - -**Note:** - -You might need the package _sysfsutils_. If this is the case, -please install it via: - -`sudo apt install sysfsutils` - -**Important:** - -To optimize the OS "now", without having to restart the system, -the script **must** also be directly executed once. - -CentOS ------- - -**Script:** - -```bash -#!/bin/bash - -### BEGIN INIT INFO -# Provides: arangodb-memory-configuration -# Required-Start: -# Required-Stop: -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: Set arangodb kernel parameters -# Description: Set arangodb kernel parameters -### END INIT INFO - -# 1 - Raise the vm map count value -sysctl -w "vm.max_map_count=2048000" - -# 2 - Disable Transparent Huge Pages -bash -c "echo madvise > /sys/kernel/mm/transparent_hugepage/enabled" -bash -c "echo madvise > /sys/kernel/mm/transparent_hugepage/defrag" - -# 3 - Set the virtual memory accounting mode -bash -c "echo 0 > /proc/sys/vm/overcommit_memory" -``` - -**Installation Instructions:** - -1. Create the file inside the `/etc/init.d/` directory. e.g. - - `/etc/init.d/arangodb-os-optimization` - -2. Set correct permission, to mark the file executable. As _root_: - - `chmod 755 /etc/init.d/arangodb-os-optimization` - -3. On CentOS/RedHat, use the following commands to configure your system - to execute the script during the boot process. As _root_: - - ``` - chkconfig --add /etc/init.d/arangodb-os-optimization` - chkconfig arangodb-os-optimization on - ``` - -**Note:** - -You might need the package _sysfsutils_. If this is the case, -please install it, as _root_, via: - -`yum install sysfsutils` - -**Important:** - -To optimize the OS "now", without having to restart the system, -the script **must** also be directly executed once. diff --git a/Documentation/Books/Manual/Installation/MacOSX.md b/Documentation/Books/Manual/Installation/MacOSX.md deleted file mode 100644 index 8eddc666f38d..000000000000 --- a/Documentation/Books/Manual/Installation/MacOSX.md +++ /dev/null @@ -1,125 +0,0 @@ -Installing ArangoDB on macOS -============================ - -ArangoDB under macOS can be installed via: - -1. [Homebrew](#homebrew) -2. [_DMG_ Package](#package-installation) -3. [_tar.gz_ Archive](#installing-using-the-archive) (starting from v3.4.0) - -Homebrew --------- - -{% hint 'info' %} -When installing ArangoDB via the macOS package manager Homebrew, -only the Community Edition is available. -{% endhint %} - -{% hint 'warning' %} -The Homebrew installation is updated a few days after the -official release of a new version. -{% endhint %} - -If you are using [_homebrew_](http://brew.sh/), -then you can install the latest released stable version of ArangoDB using *brew* as follows: - -``` -brew install arangodb -``` - -This will install the current stable version of ArangoDB and all -dependencies within your Homebrew tree. Note that the server will be -installed as: - -``` -/usr/local/Cellar/arangodb/3.4.0/sbin/arangod -``` - -You can start the server by running the command `/usr/local/Cellar/arangodb/3.4.0/sbin/arangod &`. - -Configuration file is located at - - /usr/local/etc/arangodb3/arangod.conf - -The ArangoDB shell will be installed as: - -``` -/usr/local/Cellar/arangodb/3.4.0/bin/arangosh -``` - -You can uninstall ArangoDB using: - -``` -brew uninstall arangodb -``` - -However, in case you started ArangoDB using the _launchctl_, you -need to unload it before uninstalling the server: - -``` -launchctl unload ~/Library/LaunchAgents/homebrew.mxcl.arangodb.plist -``` - -Then remove the LaunchAgent: - -``` -rm ~/Library/LaunchAgents/homebrew.mxcl.arangodb.plist -``` - -{% hint 'tip' %} -If the latest ArangoDB version is not shown in Homebrew, you -also need to update Homebrew executing the command `brew update`. -{% endhint %} - -### Known issues - -- The ArangoDB Starter is not included in v3.3.x, but shipped with all 3.4.x versions. -- The Commandline argument parsing does not accept blanks in filenames; the CLI version below does. -- If you need to change server endpoint while starting _homebrew_ version, you can edit arangod.conf - file and uncomment line with endpoint needed, e.g.: - - [server] - endpoint = tcp://0.0.0.0:8529 - -Package Installation --------------------- - -We provide a command-line app called *ArangoDB-CLI*. -Visit the official [Download](https://www.arangodb.com/download) page of the -ArangoDB website and download the *DMG* Package for macOS. - -You can install the application in your application folder. - -Starting the application will start the server and open a terminal window -showing you the log-file. - - ArangoDB server has been started - - The database directory is located at - '/Users//Library/ArangoDB/var/lib/arangodb3' - - The log file is located at - '/Users//Library/ArangoDB/var/log/arangodb3/arangod.log' - - You can access the server using a browser at 'http://127.0.0.1:8529/' - or start the ArangoDB shell - '/Applications/ArangoDB3-CLI.app/Contents/Resources/arangosh' - - Switching to log-file now, killing this windows will NOT stop the server. - - - 2018-03-16T09:37:01Z [13373] INFO ArangoDB (version 3.3.4 [darwin]) is ready for business. Have fun! - -Note that it is possible to install both, the _homebrew_ version and the command-line -app. You should, however, edit the configuration files of one version and change -the port used. - -Installing using the archive ----------------------------- - -Starting from v3.4.0 a _tar.gz_ package is also available for macOS. - -Visit the official [Download](https://www.arangodb.com/download) page of the ArangoDB -website and download the _tar.gz_ archive for macOS. - -To install, just extract the archive. diff --git a/Documentation/Books/Manual/Installation/README.md b/Documentation/Books/Manual/Installation/README.md deleted file mode 100644 index 3846f603e4ee..000000000000 --- a/Documentation/Books/Manual/Installation/README.md +++ /dev/null @@ -1,30 +0,0 @@ -Installation -============ - -To install ArangoDB, as first step, please download the package for your Operating -System from the official [Download](https://www.arangodb.com/download) page of the -ArangoDB web site. - -You can find packages for various operating systems, including _RPM_ and _Debian_ -packages. _dmg_ packages or _homebrew_ can be used on macOS; an _Installer_ or a -`zip` archive is available for Windows. - -{% hint 'info' %} -Starting from version 3.4.0, in addition to installation packages, a `tar.gz` archive -is available for the Linux and macOS platforms. -{% endhint %} - -Besides the official images which are provided for the most popular linux distributions -there are also a variety of unofficial images provided by the community. We are -tracking most of the community contributions (including new or updated images) in -our newsletter: https://www.arangodb.com/category/newsletter/ - -- [Linux](Linux.md) -- [macOS](MacOSX.md) -- [Windows](Windows.md) - -If you prefer to compile ArangoDB from source, please refer to the [Compiling](Compiling.md) -_Section_. - -For detailed information on how to deploy ArangoDB, once it has been installed, -please refer to the [Deployment](../Deployment/README.md) chapter. diff --git a/Documentation/Books/Manual/Installation/Windows.md b/Documentation/Books/Manual/Installation/Windows.md deleted file mode 100644 index fe24bab27756..000000000000 --- a/Documentation/Books/Manual/Installation/Windows.md +++ /dev/null @@ -1,183 +0,0 @@ -Installing ArangoDB on Windows -============================== - -Introduction ------------- - -There are two possible methods to install ArangoDB on Windows: - -1. Automated, using an _NSIS_ Installer. -1. Manual, using a ZIP archive (XCopy installation). - -Both installation methods have their own pros and cons. - -{% hint 'info' %} -Please note that ArangoDB will only work on 64bit systems. -{% endhint %} - -Installing using the Installer ------------------------------- - -The default installation directory is *C:\Program Files\ArangoDB-3.x.x*. During the -installation process you may change this. In the following description we will assume -that ArangoDB has been installed in the location *<ROOTDIR>*. - -You have to be careful when choosing an installation directory. You need either -write permission to this directory or you need to modify the configuration file -for the server process. In the latter case the database directory and the Foxx -directory have to be writable by the user. - -### Single- and Multiuser Installation - -There are two main modes for the installer of ArangoDB. -The installer lets you select: - -- multi user installation (default; admin privileges required) - Will install ArangoDB as service. -- single user installation - Allow to install Arangodb as normal user. - Requires manual starting of the database server. - -### Installation Options - -The checkboxes allow you to chose weather you want to: - -- chose custom install paths -- do an automatic upgrade -- keep an backup of your data -- add executables to path -- create a desktop icon - -or not. - -#### Custom Install Paths - -This checkbox controls if you will be able to override -the default paths for the installation in subsequent steps. - -The default installation paths are: - -Multi User Default: -- Installation: `%PROGRAMFILES%\ArangoDB-3.x.x` -- DataBase: `%PROGRAMDATA%\ArangoDB` -- Foxx Service: `%PROGRAMDATA%\ArangoDB-apps` - -Single User Default: -- Installation: `%LOCALAPPDATA%\ArangoDB-3.x.x\` -- DataBase: `%LOCALAPPDATA%\ArangoDB\` -- Foxx Service: `%LOCALAPPDATA%\ArangoDB-apps\` - -The environment variables are typically: -- `%PROGRAMFILES%`: `C:\Program Files` -- `%PROGRAMDATA%`: `C:\ProgramData` -- `%LOCALAPPDATA%`: `C:\Users\\AppData\Local` - -We are not using the roaming part of the user's profile, because doing so -avoids the data being synced to the windows domain controller. - -#### Automatic Upgrade - -If this checkbox is selected the installer will attempt to perform an automatic -update. For more information please see -[Upgrading on Windows](../Upgrading/OSSpecificInfo/Windows.md). - -#### Keep Backup - -Select this to create a backup of your database directory during automatic upgrade. -The backup will be created next to your current database directory suffixed by -a time stamp. - -#### Add to Path - -Select this to add the binary directory to your system's path (multi user -installation) or user's path (single user installation). - -#### Desktop Icon - -Select if you want the installer to create Desktop Icons that let you: - -- access the web inteface -- start the commandline client (arangosh) -- start the database server (single user installation only) - -### Starting - -If you installed ArangoDB for multiple users (as a service) it is automatically -started. Otherwise you need to use the link that was created on you Desktop if -you chose to let the installer create desktop icons or - -the executable *arangod.exe* located in -*<ROOTDIR>\bin*. This will use the configuration file *arangod.conf* -located in *<ROOTDIR>\etc\arangodb*, which you can adjust to your needs -and use the data directory *<ROOTDIR>\var\lib\arangodb*. This is the place -where all your data (databases and collections) will be stored by default. - -Please check the output of the *arangod.exe* executable before going on. If the -server started successfully, you should see a line `ArangoDB is ready for -business. Have fun!` at the end of its output. - -We now wish to check that the installation is working correctly and to do this -we will be using the administration web interface. Execute *arangod.exe* if you -have not already done so, then open up your web browser and point it to the -page: - -``` -http://127.0.0.1:8529/ -``` - -### Advanced Starting - -If you want to provide our own start scripts, you can set the environment -variable *ARANGODB_CONFIG_PATH*. This variable should point to a directory -containing the configuration files. - -### Using the Client - -To connect to an already running ArangoDB server instance, there is a shell -*arangosh.exe* located in *<ROOTDIR>\bin*. This starts a shell which can be -used – amongst other things – to administer and query a local or remote -ArangoDB server. - -Note that *arangosh.exe* does NOT start a separate server, it only starts the -shell. To use it you must have a server running somewhere, e.g. by using -the *arangod.exe* executable. - -*arangosh.exe* uses configuration from the file *arangosh.conf* located in -*<ROOTDIR>\etc\arangodb\*. Please adjust this to your needs if you want to -use different connection settings etc. - -### Uninstalling - -To uninstall the Arango server application you can use the windows control panel -(as you would normally uninstall an application). Note however, that any data -files created by the Arango server will remain as well as the *<ROOTDIR>* -directory. To complete the uninstallation process, remove the data files and -the *<ROOTDIR>* directory manually. - -Installing using the ZIP archive (XCopy installation) ------------------------------------------------------ - -Not all users prefer the guided _Installer_ to install ArangoDB. In order to have a -[portable application](http://en.wikipedia.org/wiki/Portable_application), or easily -start different ArangoDB versions on the same machine, and/or for the maximum flexibility, -you might want to install using the _ZIP_ archive ([XCOPY deployment](http://en.wikipedia.org/wiki/XCOPY_deployment)). - -### Unzip the archive - -Open an explorer, choose a place where you would like ArangoDB to be, and extract the -archive there. It will create its own top-level directory with the version number in the name. - -### Edit the configuration - -*This step is optional.* - -If the default configuration of ArangoDB does not suite your needs, -you can edit `etc\arangodb3\arangod.conf` to change or add configuration options. - -### Start the Server - -After installation, you may start ArangoDB in several ways. The exact start-up command -depends on the type of ArangoDB deployment you are interested in -(_Single Instance_, _Master-Slave_, _Active Failover_ or _Cluster_). - -Please refer to the [_Deployment_](../Deployment/README.md) chapter for details. diff --git a/Documentation/Books/Manual/Monitoring/DC2DC/README.md b/Documentation/Books/Manual/Monitoring/DC2DC/README.md deleted file mode 100644 index 73bce37bc37c..000000000000 --- a/Documentation/Books/Manual/Monitoring/DC2DC/README.md +++ /dev/null @@ -1,91 +0,0 @@ - -# Monitoring datacenter to datacenter replication - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -This section includes information related to the monitoring of the _datacenter -to datacenter replication_. - -For a general introduction to the _datacenter to datacenter replication_, please -refer to the [Datacenter to datacenter replication](../../Architecture/DeploymentModes/DC2DC/README.md) -chapter. - -## Metrics - -_ArangoSync_ (_master_ & _worker_) provide metrics that can be used for monitoring -the _datacenter to datacenter replication_ solution. These metrics are available -using the following HTTPS endpoints: - -- GET `/metrics`: Provides metrics in a format supported by Prometheus. -- GET `/metrics.json`: Provides the same metrics in JSON format. - -Both endpoints include help information per metrics. - -Note: Both endpoints require authentication. Besides the usual authentication methods -these endpoints are also accessible using a special bearer token specified using the `--monitoring.token` -command line option. - -The Prometheus output (`/metrics`) looks like this: - -```text -... -# HELP arangosync_master_worker_registrations Total number of registrations -# TYPE arangosync_master_worker_registrations counter -arangosync_master_worker_registrations 2 -# HELP arangosync_master_worker_storage Number of times worker info is stored, loaded -# TYPE arangosync_master_worker_storage counter -arangosync_master_worker_storage{kind="",op="save",result="success"} 20 -arangosync_master_worker_storage{kind="empty",op="load",result="success"} 1 -... -``` - -The JSON output (`/metrics.json`) looks like this: - -```json -{ - ... - "arangosync_master_worker_registrations": { - "help": "Total number of registrations", - "type": "counter", - "samples": [ - { - "value": 2 - } - ] - }, - "arangosync_master_worker_storage": { - "help": "Number of times worker info is stored, loaded", - "type": "counter", - "samples": [ - { - "value": 8, - "labels": { - "kind": "", - "op": "save", - "result": "success" - } - }, - { - "value": 1, - "labels": { - "kind": "empty", - "op": "load", - "result": "success" - } - } - ] - } - ... -} -``` - -Hint: To get a list of a metrics and their help information, run: - -```bash -alias jq='docker run --rm -i realguess/jq jq' -curl -sk -u ":" https://:8629/metrics.json | \ - jq 'with_entries({key: .key, value:.value.help})' -``` diff --git a/Documentation/Books/Manual/Monitoring/LogLevels.md b/Documentation/Books/Manual/Monitoring/LogLevels.md deleted file mode 100644 index 0b91b8e280b7..000000000000 --- a/Documentation/Books/Manual/Monitoring/LogLevels.md +++ /dev/null @@ -1,150 +0,0 @@ -Log Levels -========== - -In this section we define and describe the meaning of the log levels -in ArangoDB's log messages. The log levels are, from most to least severe: - - - FATAL - - ERROR - - WARN - - INFO - - DEBUG - - TRACE - -For each log topic one can configure the lowest level which is actually logged. -For example, if one sets the log level to `ERROR` for some log topic, -one only sees messages of level `ERROR` and above (`ERROR` and `FATAL`). - -See an example of how to -[configure log levels](../Administration/Configuration/README.md#options-with-multiple-values) -in the Administration chapter. - -FATAL ------ - -_Fatal_ errors are the most severe errors and only occur if a service or application -can not recover safely from an abnormal state, which forces it to shut down. - -Typically, a fatal error only occurs once in the process lifetime, -so if the log file is tied to the process, this is typically -the last message in the log. There might be a few exceptions to this -rule, where it makes more sense to keep the server running, for example -to be able to diagnose the problem better. - -We reserve this error type for the following events: - -- crucial files/folders are missing or inaccessible during startup -- overall application or system failure with a serious danger of - data corruption or loss (the following shutdown is intended to prevent - possible or further data loss) - -**Recommendation**: -Fatal errors should be investigated immediately by a system administrator. - -ERROR ------ - - -If a problem is encountered which is fatal to some operation, but not for -the service or the application as a whole, then an _error_ is logged. - -Reasons for log entries of this severity are for example: - -- missing data -- a required file can't be opened -- incorrect connection strings -- missing services - -If some operation is automatically retried and eventually succeeds, -no error will be written to the log. Therefore, if an error is logged then -it should be taken seriously as it may require user intervention to solve. - -Note that in any distributed system, temporary failures of network connections -or certain servers or services can and will happen. Most systems will tolerate -such failures and retry for some time, but will eventually run out of patience, -give up and fail the operation one level up. - -**Recommendation**: -A system administrator should be notified automatically to investigate the error. -By filtering the log to look at errors (and other logged events above) -one can determine the error frequency and quickly identify the initial failure -that might have resulted in a cascade of additional errors. - -WARN ----- - -A _warning_ is triggered by anything that can potentially cause -application oddities, but from which the system recovers automatically. - -Examples of events which lead to warnings: - -- switching from a primary to backup server -- retrying an operation -- missing secondary data -- things running inefficiently - (in particular slow queries and bad system settings) - -Certain warnings are logged at startup time only, such as startup option -values which lie outside the recommended range. - -These _might_ be problems, or might not. For example, expected transient -environmental conditions such as short loss of network or database -connectivity are logged as warnings, not errors. Viewing a log filtered -to show only warnings and errors may give quick insight into early -hints at the root cause of subsequent errors. - -**Recommendation**: -Can mostly be ignored but can give hints for inefficiencies or -future problems. - -INFO ----- - -_Info_ messages are generally useful information to log to better -understand what state the system is in. One will usually want to -have info messages available but does usually not care about them -under normal circumstances. - -Informative messages are logged in events like: - -- successful initialization -- services starting or stopping -- successful completion of significant transactions -- configuration assumptions - -Viewing log entries of severity _info_ and above should give a quick overview -of major state changes in the process providing top-level context for -understanding any warnings or errors that also occur. Logging info level -messages and above will usually not spam anything beyond good readability. - -**Recommendation**: -Usually good to have, but one does not have to look at _info_ level messages -under normal circumstances. - -DEBUG ------ - -Information which is helpful to ArangoDB developers as well as to other -people like system administrators to diagnose an application or service -is logged as _debug_ message. - -Debug messages make software much more maintainable but require some -diligence because the value of individual debug statements may change -over time as programs evolve. The best way to achieve this is by getting -the development team in the habit of regularly reviewing logs as a standard -part of troubleshooting reported issues. We encourage our teams to -prune out messages that no longer provide useful context and to add -messages where needed to understand the context of subsequent messages. - -**Recommendation**: -_Debug_ level messages are usually switched off, but one can switch them on -to investigate problems. - -TRACE ------ - -_Trace_ messages produce a lot of output and are usually only needed by -ArangoDB developers to debug problems in the source code. - -**Recommendation**: -_Trace_ level logging should generally stay disabled. diff --git a/Documentation/Books/Manual/Monitoring/README.md b/Documentation/Books/Manual/Monitoring/README.md deleted file mode 100644 index f256f8657633..000000000000 --- a/Documentation/Books/Manual/Monitoring/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Monitoring - -- [Log Levels](LogLevels.md) -- [Datacenter to datacenter replication](DC2DC/README.md) diff --git a/Documentation/Books/Manual/Programs/Arango-dfdb/Examples.md b/Documentation/Books/Manual/Programs/Arango-dfdb/Examples.md deleted file mode 100644 index 983eb606743e..000000000000 --- a/Documentation/Books/Manual/Programs/Arango-dfdb/Examples.md +++ /dev/null @@ -1,75 +0,0 @@ -Arango-dfdb Examples -==================== - -ArangoDB uses append-only journals. Data corruption should only occur when the -database server is killed. In this case, the corruption should only occur in the -last object(s) that have being written to the journal. - -If a corruption occurs within a normal datafile, then this can only happen if a -hardware fault occurred. - -If a journal or datafile is corrupt, shut down the database server and start -the program - - arango-dfdb - -in order to check the consistency of the datafiles and journals. This brings up - - ___ _ __ _ _ ___ ___ ___ - / \__ _| |_ __ _ / _(_) | ___ / \/ __\ / _ \ - / /\ / _` | __/ _` | |_| | |/ _ \ / /\ /__\// / /_\/ - / /_// (_| | || (_| | _| | | __/ / /_// \/ \/ /_\\ - /___,' \__,_|\__\__,_|_| |_|_|\___| /___,'\_____/\____/ - - Available collections: - 0: _structures - 1: _users - 2: _routing - 3: _modules - 4: _graphs - 5: products - 6: prices - *: all - - Collection to check: - -You can now select which database and collection you want to check. After you selected -one or all of the collections, a consistency check will be performed. - - Checking collection #1: _users - - Database - path: /usr/local/var/lib/arangodb - - Collection - name: _users - identifier: 82343 - - Datafiles - # of journals: 1 - # of compactors: 1 - # of datafiles: 0 - - Datafile - path: /usr/local/var/lib/arangodb/collection-82343/journal-1065383.db - type: journal - current size: 33554432 - maximal size: 33554432 - total used: 256 - # of entries: 3 - status: OK - -If there is a problem with one of the datafiles, then the database debugger will print it -and prompt for whether to attempt to fix it. - - WARNING: The journal was not closed properly, the last entries are corrupted. - This might happen ArangoDB was killed and the last entries were not - fully written to disk. - - Wipe the last entries (Y/N)? - -If you answer **Y**, the corrupted entry will be removed. - -If you see a corruption in a datafile (and not a journal), then something is -terribly wrong. These files are immutable and never changed by ArangoDB. A -corruption in such file is an indication of a hard-disk failure. diff --git a/Documentation/Books/Manual/Programs/Arango-dfdb/README.md b/Documentation/Books/Manual/Programs/Arango-dfdb/README.md deleted file mode 100644 index 2db4ed149326..000000000000 --- a/Documentation/Books/Manual/Programs/Arango-dfdb/README.md +++ /dev/null @@ -1,17 +0,0 @@ -Arango-dfdb -=========== - -{% hint 'danger' %} -The tool is to be used with caution, under guidance of ArangoDB support! -{% endhint %} - -The ArangoDB Datafile Debugger can check datafiles for corruptions -and remove invalid entries to repair them. Such corruptions should -not occur unless there was a hardware failure. - -The options are the same as for [arangod](../Arangod/Options.md). - -{% hint 'info' %} -`arango-dfdb` works with the -[MMFiles storage engine](../../Architecture/StorageEngines.md) only. -{% endhint %} diff --git a/Documentation/Books/Manual/Programs/Arangobench/Examples.md b/Documentation/Books/Manual/Programs/Arangobench/Examples.md deleted file mode 100644 index 14e56ac19bb9..000000000000 --- a/Documentation/Books/Manual/Programs/Arangobench/Examples.md +++ /dev/null @@ -1,24 +0,0 @@ -Arangobench Examples -==================== - -Start Arangobench with the default user and server endpoint: - - arangobench - -Run the 'version' test case with 1000 requests, without concurrency: - - --test-case version --requests 1000 --concurrency 1 - -Run the 'document' test case with 2000 requests, with two concurrent threads: - - --test-case document --requests 1000 --concurrency 2 - -Run the 'document' test case with 2000 requests, with concurrency 2, -with async requests: - - --test-case document --requests 1000 --concurrency 2 --async true - -Run the 'document' test case with 2000 requests, with concurrency 2, -using batch requests: - - --test-case document --requests 1000 --concurrency 2 --batch-size 10 diff --git a/Documentation/Books/Manual/Programs/Arangobench/Options.md b/Documentation/Books/Manual/Programs/Arangobench/Options.md deleted file mode 100644 index 1ea340d55bc0..000000000000 --- a/Documentation/Books/Manual/Programs/Arangobench/Options.md +++ /dev/null @@ -1,34 +0,0 @@ -Arangobench Startup Options -=========================== - -Usage: `arangobench []` - -@startDocuBlock program_options_arangobench - -Notes ------ - -### Test cases - -Value | Description ---------------------|------------------------- -`aqlinsert` | Insert documents via AQL -`aqltrx` | AQL Transactions with deep nested AQL `FOR` - loops -`aqlv8` | Execute AQL with V8 functions to insert random documents -`collection` | Creates collections -`counttrx` | Uses JS transactions to count the documents and insert the result again -`crud` | Create/Read/Update/Delete -`crud-append` | Create/Read/Update/Read again -`crud-write-read` | Create/Read Documents -`document` | Creates documents -`edge` | Create/Read/Update edge documents -`hash` | Create/Read/Update/Read documents indexed by a hash index -`import-document` | Creates documents via the import API -`multi-collection` | Multiple transactions combining reads & writes from js on multiple collections -`multitrx` | Multiple transactions combining reads & writes from js -`random-shapes` | Create/Read/Delete heterogeneous documents with random values -`shapes` | Create & Delete documents with heterogeneous attribute names -`shapes-append` | Create documents with heterogeneous attribute names -`skiplist` | Create/Read/Update/Read documents indexed by a skiplist -`stream-cursor` | Create documents and retrieve them in a streaming fashion -`version` | Requests /_api/version diff --git a/Documentation/Books/Manual/Programs/Arangobench/README.md b/Documentation/Books/Manual/Programs/Arangobench/README.md deleted file mode 100644 index b11a5fe3be09..000000000000 --- a/Documentation/Books/Manual/Programs/Arangobench/README.md +++ /dev/null @@ -1,11 +0,0 @@ -Arangobench -=========== - -_Arangobench_ is ArangoDB's benchmark and test tool. It can be used to issue test -requests to the database for performance and server function testing. -It supports parallel querying and batch requests. - -Related blog posts: - -- [Measuring ArangoDB insert performance](https://www.arangodb.com/2013/11/measuring-arangodb-insert-performance/) -- [Gain factor of 5 using batch requests](https://www.arangodb.com/2012/10/gain-factor-of-5-using-batch-updates/) diff --git a/Documentation/Books/Manual/Programs/Arangod/Agency.md b/Documentation/Books/Manual/Programs/Arangod/Agency.md deleted file mode 100644 index 4a84c9525d51..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Agency.md +++ /dev/null @@ -1,61 +0,0 @@ -# ArangoDB Server Agency Options - -## Activate - -`agency.activate` - -Activate agency. - -## Compaction - -`agency.compaction-keep-size` - -Keep as many indices before compaction point. - -## Election - -`agency.election-timeout-max` - -Maximum timeout before an agent calls for new election in seconds. - -`agency.election-timeout-min` - -Minimum timeout before an agent calls for new election in seconds. - -## Endpoint - -`agency.endpoint` - -Agency endpoints. - -## My address - -`agency.my-address` - -Which address to advertise to the outside. - -## Pool size - -`agency.pool-size` - -Number of agent pool. - -## Size - -`agency.size` - -Number of agents. - -## Supervision - -`agency.supervision` - -Perform ArangoDB cluster supervision. - -`agency.supervision-frequency` - -ArangoDB cluster supervision frequency in seconds. - -`agency.supervision-grace-period` - -Supervision time, after which a server is considered to have failed in seconds. diff --git a/Documentation/Books/Manual/Programs/Arangod/Arangosearch.md b/Documentation/Books/Manual/Programs/Arangod/Arangosearch.md deleted file mode 100644 index 9e21e1014dd1..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Arangosearch.md +++ /dev/null @@ -1,12 +0,0 @@ -# ArangoDB Server ArangoSearch Options - -## Threads - -`arangosearch.threads` - -The exact number of threads to use for asynchronous tasks (0 == autodetect). - -`arangosearch.threads-limit` - -Upper limit to the auto-detected number of threads to use for asynchronous -tasks (0 == use default). diff --git a/Documentation/Books/Manual/Programs/Arangod/Audit.md b/Documentation/Books/Manual/Programs/Arangod/Audit.md deleted file mode 100644 index 06f4b3f5bfa7..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Audit.md +++ /dev/null @@ -1,13 +0,0 @@ -# ArangoDB Server Audit Options - -## Hostname - -`--audit.hostname` - -Hostname to use. - -## Output - -`--audit.output` - -Audit destination(s). diff --git a/Documentation/Books/Manual/Programs/Arangod/Cache.md b/Documentation/Books/Manual/Programs/Arangod/Cache.md deleted file mode 100644 index 9667e61a8d73..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Cache.md +++ /dev/null @@ -1,23 +0,0 @@ -# ArangoDB Server Cache Options - -Since ArangoDB 3.2, the several core components of the server use a cache -system which pools memory across many different cache tables. In order to -provide intelligent internal memory management, the system periodically -reclaims memory from caches which are used less often and reallocates it to -caches which get more activity. - -## Rebalancing interval - -Time between cache rebalancing attempts: `--cache.rebalancing-interval` - -The value is specified in microseconds with a default of 2 seconds and a -minimum of 500 milliseconds. - -## Cache size - -Global size limit for all hash caches: `--cache.size` - -The global caching system, all caches, and all the data contained therein will -fit inside this limit. The size is specified in bytes. If there is less than -4GiB of RAM on the system, the default value is 256MiB. If there is more, -the default is `(system RAM size - 2GiB) * 0.25`. diff --git a/Documentation/Books/Manual/Programs/Arangod/Cluster.md b/Documentation/Books/Manual/Programs/Arangod/Cluster.md deleted file mode 100644 index a5ad84d14abd..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Cluster.md +++ /dev/null @@ -1,127 +0,0 @@ -# ArangoDB Server Clusters Options - -## Agency endpoint - - - -List of agency endpoints: -`--cluster.agency-endpoint endpoint` - -An agency endpoint the server can connect to. The option can be specified -multiple times, so the server can use a cluster of agency servers. -Endpoints have the following pattern: - -- tcp://ipv4-address:port - TCP/IP endpoint, using IPv4 -- tcp://[ipv6-address]:port - TCP/IP endpoint, using IPv6 -- ssl://ipv4-address:port - TCP/IP endpoint, using IPv4, SSL encryption -- ssl://[ipv6-address]:port - TCP/IP endpoint, using IPv6, SSL encryption - -At least one endpoint must be specified or ArangoDB will refuse to start. -It is recommended to specify at least two endpoints so ArangoDB has an -alternative endpoint if one of them becomes unavailable. - -**Examples** - -``` ---cluster.agency-endpoint tcp://192.168.1.1:4001 --cluster.agency-endpoint tcp://192.168.1.2:4002 ... -``` - -## My address - - - -This server's address / endpoint: -`--cluster.my-address endpoint` - -The server's endpoint for cluster-internal communication. If specified, it -must have the following pattern: -- tcp://ipv4-address:port - TCP/IP endpoint, using IPv4 -- tcp://[ipv6-address]:port - TCP/IP endpoint, using IPv6 -- ssl://ipv4-address:port - TCP/IP endpoint, using IPv4, SSL encryption -- ssl://[ipv6-address]:port - TCP/IP endpoint, using IPv6, SSL encryption - -If no *endpoint* is specified, the server will look up its internal -endpoint address in the agency. If no endpoint can be found in the agency -for the server's id, ArangoDB will refuse to start. - -**Examples** - -Listen only on interface with address `192.168.1.1`: - -``` ---cluster.my-address tcp://192.168.1.1:8530 -``` - -Listen on all ipv4 and ipv6 addresses, which are configured on port `8530`: - -``` ---cluster.my-address ssl://[::]:8530 -``` - -## My advertised endpoint - - - -this server's advertised endpoint (e.g. external IP address or load balancer, optional) -`--cluster.my-advertised-endpoint` - -This servers's endpoint for external communication. If specified, it -must have the following pattern: -- tcp://ipv4-address:port - TCP/IP endpoint, using IPv4 -- tcp://[ipv6-address]:port - TCP/IP endpoint, using IPv6 -- ssl://ipv4-address:port - TCP/IP endpoint, using IPv4, SSL encryption -- ssl://[ipv6-address]:port - TCP/IP endpoint, using IPv6, SSL encryption - -If no *advertised endpoint* is specified, no external endpoint will be advertised. - -**Examples** - -If an external interface is available to this server, it can be -specified to communicate with external software / drivers: - -``` ---cluster.my-advertised-enpoint tcp://some.public.place:8530 -``` - -All specifications of endpoints apply. - - -## My role - - - -This server's role: -`--cluster.my-role [dbserver|coordinator]` - -The server's role. Is this instance a db server (backend data server) -or a coordinator (frontend server for external and application access) - -## Require existing ID - -Require an existing server id: `--cluster.require-persisted-id bool` - -If set to true, then the instance will only start if a UUID file is found -in the database on startup. Setting this option will make sure the instance -is started using an already existing database directory from a previous -start, and not a new one. For the first start, the UUID file must either be -created manually in the database directory, or the option must be set to -false for the initial startup and only turned on for restarts. - -## More advanced options - -{% hint 'warning' %} -These options should generally remain untouched. -{% endhint %} - - - -Synchronous replication timing: -`--cluster.synchronous-replication-timeout-factor double` - -Stretch or clinch timeouts for internal synchronous replication -mechanism between db servers. All such timeouts are affected by this -change. Please change only with intent and great care. Default at `1.0`. - -System replication factor: `--cluster.system-replication-factor integer` - -Change default replication factor for system collections. Default at `2`. diff --git a/Documentation/Books/Manual/Programs/Arangod/Compaction.md b/Documentation/Books/Manual/Programs/Arangod/Compaction.md deleted file mode 100644 index 75501bc4295f..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Compaction.md +++ /dev/null @@ -1,138 +0,0 @@ -# ArangoDB Server Compaction Options (MMFiles) - -The ArangoDB MMFiles storage engine will run a compaction over data files. - -ArangoDB writes Documents in the WAL file. Once they have been sealed in the -WAL file, the collector may copy them into a per collection journal file. - -Once journal files fill up, they're sealed to become data files. - -One collection may have documents in the WAL logs, its journal file, and an -arbitrary number of data files. - -If a collection is loaded, each of these files are opened (thus use a file -handle) and are mmap'ed. Since file handles and memory mapped files are also -a sparse resource, that number should be kept low. - -Once you update or remove documents from data files (or already did while it was -the journal file) these documents are marked as 'dead' with a deletion marker. - -Over time the number of dead documents may rise, and we don't want to use the -previously mentioned resources, plus the disk space should be given back to -the system. Thus several journal files can be combined to one, omitting the -dead documents. - -Combining several of these data files into one is called compaction. -The compaction process reads the alive documents from the original data files, -and writes them into new data file. - -Once that is done, the memory mappings to the old data files is released, and -the files are erased. - -Since the compaction locks the collection, and also uses I/O resources, its -carefully configurable under which conditions the system should perform which -amount of these compaction jobs: - -ArangoDB spawns one compactor thread per database. The settings below vary -in scope. - -## Activity control - -The activity control parameters alter the behavior in terms of scan / execution -frequency of the compaction. - -Sleep interval between two compaction runs (in seconds): -`--compaction.db-sleep-time` - -The number of seconds the collector thread will wait between two attempts to -search for compactable data files of collections in one Database. -If the compactor has actually executed work, a subsequent lookup is done. - -Scope: Database. - -Minimum sleep time between two compaction runs (in seconds): -`--compaction.min-interval` - -When an actual compaction was executed for one collection, we wait for this -time before we execute the compaction on this collection again. -This is here to let eventually piled up user load be worked out. - -Scope: collection. - -## Source data files - -These parameters control which data files are taken into account for a -compaction run. You can specify several criteria which each off may be -sufficient alone. - -The scan over the data files belonging to one collection is executed from -oldest data file to newest; if files qualify for a compaction they may be -merged with newer files (containing younger documents). - -Scope: Collection level, some are influenced by collection settings. - -Minimal file size threshold original data files have to be below for -a compaction: -`--compaction.min-small-data-file-size` - -This is the threshold which controls below which minimum total size a data file -will always be taken into account for the compaction. - -Minimum unused count of documents in a datafile: -`--compaction.dead-documents-threshold` - -Data files will often contain dead documents. This parameter specifies their -top most accetpeable count until the data file qualifies for compaction. - -How many bytes of the source data file are allowed to be unused at most: -`--compaction.dead-size-threshold` - -The dead data size varies along with the size of your documents. -If you have many big documents, this threshold may hit before the document -count threshold. - -How many percent of the source data file should be unused at least: -`--compaction.dead-size-percent-threshold` - -Since the size of the documents may vary this threshold works on the -percentage of the dead documents size. Thus, if you have many huge -dead documents, this threshold kicks in earlier. - -To name an example with numbers, if the data file contains 800 kbytes of alive -and 400 kbytes of dead documents, the share of the dead documents is: - -`400 / (400 + 800) = 33 %`. - -If this value if higher than the specified threshold, the data file will -be compacted. - -## Compacted target files - -Once data files of a collection are qualified for a compaction run, these -parameters control how many data files are merged into one, (or even one source -data file may be compacted into one smaller target data file) - -Scope: Collection level, some are influenced by collection settings. - -Maximum number of files to merge to one file: -`--compaction.dest-max-files` - -How many data files (at most) we may merge into one resulting data file during -one compaction run. - -How large the resulting file may be in comparison to the collections `database.maximal-journal-size` setting: -`--compaction.dest-max-file-size-factor` - -In ArangoDB you can configure a default *journal file size* globally and -override it on a per collection level. This value controls the size of -collected data files relative to the configured journal file size of the -collection in question. - -A factor of 3 means that the maximum file size of the compacted file is -3 times the size of the maximum collection journal file size. - -How large may the compaction result file become: -`--compaction.dest-max-result-file-size` - -Next to the factor above, a totally maximum allowed file size in bytes may -be specified. This will overrule all previous parameters. diff --git a/Documentation/Books/Manual/Programs/Arangod/Database.md b/Documentation/Books/Manual/Programs/Arangod/Database.md deleted file mode 100644 index 50388578fb14..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Database.md +++ /dev/null @@ -1,116 +0,0 @@ -# ArangoDB Server Database Options - -## Auto upgrade - -`--database.auto-upgrade` - -Specifying this option will make the server perform a database upgrade instead -of starting the server normally. A database upgrade will first compare the -version number stored in the file VERSION in the database directory with the -current server version. - -If the version number found in the database directory is higher than the version -number the server is running, the server expects this is an unintentional -downgrade and will warn about this. Using the server in these conditions is -neither recommended nor supported. - -If the version number found in the database directory is lower than the version -number the server is running, the server will check whether there are any -upgrade tasks to perform. It will then execute all required upgrade tasks and -print their statuses. If one of the upgrade tasks fails, the server will exit -with an error. Re-starting the server with the upgrade option will then again -trigger the upgrade check and execution until the problem is fixed. - -Whether or not this option is specified, the server will always perform a -version check on startup. Running the server with a non-matching version number -in the VERSION file will make the server refuse to start. - -## Directory - -`--database.directory directory` - -The directory containing the collections and datafiles. Defaults -to */var/lib/arango*. When specifying the database directory, please -make sure the directory is actually writable by the arangod process. - -You should further not use a database directory which is provided by a -network filesystem such as NFS. The reason is that networked filesystems -might cause inconsistencies when there are multiple parallel readers or -writers or they lack features required by arangod (e.g. flock()). - -`directory` - -When using the command line version, you can simply supply the database -directory as argument. - -**Examples** - -``` -> ./arangod --server.endpoint tcp://127.0.0.1:8529 --database.directory -/tmp/vocbase -``` - -## Database directory state precondition - -`--database.require-directory-state state` - -Using this option it is possible to require the database directory to be -in a specific state on startup. the options for this value are: - -- non-existing: database directory must not exist -- existing: database directory must exist -- empty: database directory must exist but be empty -- populated: database directory must exist and contain specific files already -- any: any directory state allowed - -## Force syncing of properties - -@startDocuBlock databaseForceSyncProperties - -## Maximal Journal size (MMFiles only) - -@startDocuBlock databaseMaximalJournalSize - -## Wait for sync - -@startDocuBlock databaseWaitForSync - -## More advanced options - -`--database.throw-collection-not-loaded-error flag` - -Accessing a not-yet loaded collection will automatically load a collection on -first access. This flag controls what happens in case an operation would need to -wait for another thread to finalize loading a collection. If set to *true*, then -the first operation that accesses an unloaded collection will load it. Further -threads that try to access the same collection while it is still loading will -get an error (1238, *collection not loaded*). When the initial operation has -completed loading the collection, all operations on the collection can be -carried out normally, and error 1238 will not be thrown. - -If set to *false*, the first thread that accesses a not-yet loaded collection -will still load it. Other threads that try to access the collection while -loading will not fail with error 1238 but instead block until the collection is -fully loaded. This configuration might lead to all server threads being blocked -because they are all waiting for the same collection to complete -loading. Setting the option to *true* will prevent this from happening, but -requires clients to catch error 1238 and react on it (maybe by scheduling a -retry for later). - -The default value is *false*. - -`--database.replication-applier flag` - -Enable/disable replication applier. - -If *false* the server will start with replication appliers turned off, -even if the replication appliers are configured with the *autoStart* option. -Using the command-line option will not change the value of the *autoStart* -option in the applier configuration, but will suppress auto-starting the -replication applier just once. - -If the option is not used, ArangoDB will read the applier configuration -from the file *REPLICATION-APPLIER-CONFIG* on startup, and use the value of -the *autoStart* attribute from this file. - -The default is *true*. diff --git a/Documentation/Books/Manual/Programs/Arangod/Foxx.md b/Documentation/Books/Manual/Programs/Arangod/Foxx.md deleted file mode 100644 index 87d27a4b86b2..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Foxx.md +++ /dev/null @@ -1,9 +0,0 @@ -# ArangoDB Server Foxx Options - -## Foxx queues - -@startDocuBlock foxxQueues - -## Foxx queues poll interval - -@startDocuBlock foxxQueuesPollInterval diff --git a/Documentation/Books/Manual/Programs/Arangod/Frontend.md b/Documentation/Books/Manual/Programs/Arangod/Frontend.md deleted file mode 100644 index 36501d83cd18..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Frontend.md +++ /dev/null @@ -1,12 +0,0 @@ -# ArangoDB Server Frontend Options - -## Proxy settings - -`frontend.proxy-request-check` - -Enable proxy request checking. - -`frontend.trusted-proxy` - -List of proxies to trust (may be IP or network). -Make sure `frontend.proxy-request-check` is enabled. diff --git a/Documentation/Books/Manual/Programs/Arangod/Global.md b/Documentation/Books/Manual/Programs/Arangod/Global.md deleted file mode 100644 index 0240d6500dc3..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Global.md +++ /dev/null @@ -1,131 +0,0 @@ -# ArangoDB Server Global Options - -## General help - -`--help` - -`-h` - -Prints a list of the most common options available and then exits. In order to -see all options use *--help-all*. - -To receive the startup options in JSON format, pass the `--dump-options` flag. This will -print out all options and exit. - -## Version - -`--version` - -`-v` - -Prints the version of the server and exits. - -## Daemon - -`--daemon` - -Runs the server as a daemon (as a background process). This parameter can only -be set if the pid (process id) file is specified. That is, unless a value to the -parameter pid-file is given, then the server will report an error and exit. - -## Default Language - -`--default-language default-language` - -The default language ist used for sorting and comparing strings. The language -value is a two-letter language code (ISO-639) or it is composed by a two-letter -language code with and a two letter country code (ISO-3166). Valid languages are -"de", "en", "en_US" or "en_UK". - -The default default-language is set to be the system locale on that platform. - -## Supervisor - -`--supervisor` - -Executes the server in supervisor mode. In the event that the server -unexpectedly terminates due to an internal error, the supervisor will -automatically restart the server. Setting this flag automatically implies that -the server will run as a daemon. Note that, as with the daemon flag, this flag -requires that the pid-file parameter will set. - -```js -unix> ./arangod --supervisor --pid-file /var/run/arangodb.pid /tmp/vocbase/ -2012-06-27T15:58:28Z [10133] INFO starting up in supervisor mode -``` - -As can be seen (e.g. by executing the ps command), this will start a supervisor -process and the actual database process: - -```js -unix> ps fax | grep arangod -10137 ? Ssl 0:00 ./arangod --supervisor --pid-file /var/run/arangodb.pid /tmp/vocbase/ -10142 ? Sl 0:00 \_ ./arangod --supervisor --pid-file /var/run/arangodb.pid /tmp/vocbase/ -``` - -When the database process terminates unexpectedly, the supervisor process will -start up a new database process: - -``` -> kill -SIGSEGV 10142 - -> ps fax | grep arangod -10137 ? Ssl 0:00 ./arangod --supervisor --pid-file /var/run/arangodb.pid /tmp/vocbase/ -10168 ? Sl 0:00 \_ ./arangod --supervisor --pid-file /var/run/arangodb.pid /tmp/vocbase/ -``` - -## User identity - -`--uid uid` - -The name (identity) of the user the server will run as. If this parameter is not -specified, the server will not attempt to change its UID, so that the UID used -by the server will be the same as the UID of the user who started the server. If -this parameter is specified, then the server will change its UID after opening -ports and reading configuration files, but before accepting connections or -opening other files (such as recovery files). This is useful when the server -must be started with raised privileges (in certain environments) but security -considerations require that these privileges be dropped once the server has -started work. - -Observe that this parameter cannot be used to bypass operating system -security. In general, this parameter (and its corresponding relative gid) can -lower privileges but not raise them. - - -## Group identity - -`--gid gid` - -The name (identity) of the group the server will run as. If this parameter is -not specified, then the server will not attempt to change its GID, so that the -GID the server runs as will be the primary group of the user who started the -server. If this parameter is specified, then the server will change its GID -after opening ports and reading configuration files, but before accepting -connections or opening other files (such as recovery files). - -This parameter is related to the parameter uid. - - -## Process identity - -`--pid-file filename` - -The name of the process ID file to use when running the server as a -daemon. This parameter must be specified if either the flag *daemon* or -*supervisor* is set. - -## Console - -`--console` - -Runs the server in an exclusive emergency console mode. When -starting the server with this option, the server is started with -an interactive JavaScript emergency console, with all networking -and HTTP interfaces of the server disabled. - -No requests can be made to the server in this mode, and the only -way to work with the server in this mode is by using the emergency -console. -Note that the server cannot be started in this mode if it is -already running in this or another mode. diff --git a/Documentation/Books/Manual/Programs/Arangod/Http.md b/Documentation/Books/Manual/Programs/Arangod/Http.md deleted file mode 100644 index 083c5666a40d..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Http.md +++ /dev/null @@ -1,45 +0,0 @@ -# ArangoDB Server HTTP Options - -## Keep-alive timeout - -`--http.keep-alive-timeout` - -Allows to specify the timeout for HTTP keep-alive connections. The timeout -value must be specified in seconds. -Idle keep-alive connections will be closed by the server automatically -when the timeout is reached. A keep-alive-timeout value 0 will disable the keep -alive feature entirely. - -## Hide Product header - -`--http.hide-product-header` - -If *true*, the server will exclude the HTTP header "Server: ArangoDB" in -HTTP responses. If set to *false*, the server will send the header in -responses. - -The default is *false*. - - -## Allow method override - -`--http.allow-method-override` - -When this option is set to *true*, the HTTP request method will optionally -be fetched from one of the following HTTP request headers if present in -the request: - -- *x-http-method* -- *x-http-method-override* -- *x-method-override* - -If the option is set to *true* and any of these headers is set, the -request method will be overridden by the value of the header. For example, -this allows issuing an HTTP DELETE request which to the outside world will -look like an HTTP GET request. This allows bypassing proxies and tools that -will only let certain request types pass. - -Setting this option to *true* may impose a security risk so it should only -be used in controlled environments. - -The default value for this option is *false*. diff --git a/Documentation/Books/Manual/Programs/Arangod/Javascript.md b/Documentation/Books/Manual/Programs/Arangod/Javascript.md deleted file mode 100644 index 8ade5df28676..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Javascript.md +++ /dev/null @@ -1,128 +0,0 @@ -# ArangoDB Server JavaScript Options - -## JavaScript code execution - -`--javascript.allow-admin-execute` - -This option can be used to control whether user-defined JavaScript code -is allowed to be executed on server by sending via HTTP to the API endpoint -`/_admin/execute` with an authenticated user account. -The default value is *false*, which disables the execution of user-defined -code. This is also the recommended setting for production. In test environments, -it may be convenient to turn the option on in order to send arbitrary setup -or teardown commands for execution on the server. - -## V8 contexts - -`--javascript.v8-contexts number` - -Specifies the maximum *number* of V8 contexts that are created for executing -JavaScript code. More contexts allow executing more JavaScript actions in -parallel, provided that there are also enough threads available. Please note -that each V8 context will use a substantial amount of memory and requires -periodic CPU processing time for garbage collection. - -Note that this value configures the maximum number of V8 contexts that can be -used in parallel. Upon server start only as many V8 contexts will be created as -are configured in option `--javascript.v8-contexts-minimum`. The actual number of -available V8 contexts may float at runtime between `--javascript.v8-contexts-minimum` -and `--javascript.v8-contexts`. When there are unused V8 contexts that linger around, -the server's garbage collector thread will automatically delete them. - -`--javascript.v8-contexts-minimum number` - -Specifies the minimum *number* of V8 contexts that will be present at any time -the server is running. The actual number of V8 contexts will never drop below this -value, but it may go up as high as specified via the option `--javascript.v8-contexts`. - -When there are unused V8 contexts that linger around and the number of V8 contexts -is greater than `--javascript.v8-contexts-minimum` the server's garbage collector -thread will automatically delete them. - -`--javascript.v8-contexts-max-invocations` - -Specifies the maximum number of invocations after which a used V8 context is -disposed. The default value of `--javascript.v8-contexts-max-invocations` is 0, -meaning that the maximum number of invocations per context is unlimited. - -`--javascript.v8-contexts-max-age` - -Specifies the time duration (in seconds) after which time a V8 context is disposed -automatically after its creation. If the time is elapsed, the context will be disposed. -The default value for `--javascript.v8-contexts-max-age` is 60 seconds. - -If both `--javascript.v8-contexts-max-invocations` and `--javascript.v8-contexts-max-age` -are set, then the context will be destroyed when either of the specified threshold -values is reached. - -## Garbage collection frequency (time-based) - -`--javascript.gc-frequency frequency` - -Specifies the frequency (in seconds) for the automatic garbage collection of -JavaScript objects. This setting is useful to have the garbage collection still -work in periods with no or little numbers of requests. - -## Garbage collection interval (request-based) - -`--javascript.gc-interval interval` - -Specifies the interval (approximately in number of requests) that the garbage -collection for JavaScript objects will be run in each thread. - -## V8 options - -`--javascript.v8-options options` - -Optional arguments to pass to the V8 Javascript engine. The V8 engine will run -with default settings unless explicit options are specified using this -option. The options passed will be forwarded to the V8 engine which will parse -them on its own. Passing invalid options may result in an error being printed on -stderr and the option being ignored. - -Options need to be passed in one string, with V8 option names being prefixed -with double dashes. Multiple options need to be separated by whitespace. To get -a list of all available V8 options, you can use the value *"--help"* as follows: - -``` ---javascript.v8-options="--help" -``` - -Another example of specific V8 options being set at startup: - -``` ---javascript.v8-options="--log" -``` - -Names and features or usable options depend on the version of V8 being used, and -might change in the future if a different version of V8 is being used in -ArangoDB. Not all options offered by V8 might be sensible to use in the context -of ArangoDB. Use the specific options only if you are sure that they are not -harmful for the regular database operation. - -### Enable or Disable V8 JavaScript Engine entirely - -``` ---javascript.enabled bool -``` - -In certain types of ArangoDB instances you can now completely disable the V8 -JavaScript engine. Be aware that this is an **highly experimental** feature and -it is to be expected that certain functionality (e.g. some API endpoints, the -WebUI, some AQL functions etc) will be missing or severely broken. Nevertheless -you may wish to reduce the footprint of ArangoDB by disabling V8. - -This option is expected to **only** work reliably on a _single server_, _DBServer_, -or _agency_. Do not try to use this feature on a _coordinator_ or in the _ActiveFailover_ setup. - -### Copy JavaScript Installation files - -``` ---javascript.copy-installation bool -``` - -Copy contents of 'javascript.startup-directory' on first start of the server. This option -is intended to be useful for _rolling upgrades_. Setting this to _true_ means that you can -upgrade the underlying ArangoDB packages, without influencing the running _arangod_ instance. -Setting this value does only make sense if you use ArangoDB outside of a container solution, -like Docker, Kubernetes, etc. diff --git a/Documentation/Books/Manual/Programs/Arangod/Ldap.md b/Documentation/Books/Manual/Programs/Arangod/Ldap.md deleted file mode 100644 index 56c14ca89300..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Ldap.md +++ /dev/null @@ -1,502 +0,0 @@ -# ArangoDB Server LDAP Options - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -## Basics Concepts - -The basic idea is that one can keep the user authentication setup for -an ArangoDB instance (single or cluster) outside of ArangoDB in an LDAP -server. A crucial feature of this is that one can add and withdraw users -and permissions by only changing the LDAP server and in particular -without touching the ArangoDB instance. Changes will be effective in -ArangoDB within a few minutes. - -Since there are many different possible LDAP setups, we must support a -variety of possibilities for authentication and authorization. Here is -a short overview: - -To map ArangoDB user names to LDAP users there are two authentication -methods called "simple" and "search". In the "simple" method the LDAP bind -user is derived from the ArangoDB user name by prepending a prefix and -appending a suffix. For example, a user "alice" could be mapped to the -distinguished name `uid=alice,dc=arangodb,dc=com` to perform the LDAP -bind and authentication. -See [Simple authentication method](#simple-authentication-method) -below for details and configuration options. - -In the "search" method there are two phases. In Phase 1 a generic -read-only admin LDAP user account is used to bind to the LDAP server -first and search for an LDAP user matching the ArangoDB user name. In -Phase 2, the actual authentication is then performed against the LDAP -user that was found in phase 1. Both methods are sensible and are -recommended to use in production. -See [Search authentication method](#search-authentication-method) -below for details and configuration options. - -Once the user is authenticated, there are now two methods for -authorization: (a) "roles attribute" and (b) "roles search". - -In method (a) ArangoDB acquires a list of roles the authenticated LDAP -user has from the LDAP server. The actual access rights to databases -and collections for these roles are configured in ArangoDB itself. -The user effectively has the union of all access rights of all roles -he has. This method is probably the most common one for production use -cases. It combines the advantages of managing users and roles outside of -ArangoDB in the LDAP server with the fine grained access control within -ArangoDB for the individual roles. See [Roles attribute](#roles-attribute) -below for details about method (a) and for the associated configuration -options. - -Method (b) is very similar and only differs from (a) in the way the -actual list of roles of a user is derived from the LDAP server. -See [Roles search](#roles-search) below for details about method (b) -and for the associated configuration options. - - -Fundamental options -------------------- - -The fundamental options for specifying how to access the LDAP server are -the following: - - - `--ldap.enabled` this is a boolean option which must be set to - `true` to activate the LDAP feature - - `--ldap.server` is a string specifying the host name or IP address - of the LDAP server - - `--ldap.port` is an integer specifying the port the LDAP server is - running on, the default is *389* - - `--ldap.basedn` specifies the base distinguished name under which - the search takes place (can alternatively be set via `--ldap.url`) - - `--ldap.binddn` and `--ldap.bindpasswd` are distinguished name and - password for a read-only LDAP user to which ArangoDB can bind to - search the LDAP server. Note that it is necessary to configure these - for both the "simple" and "search" authentication methods, since - even in the "simple" method, ArangoDB occasionally has to refresh - the authorization information from the LDAP server - even if the user session persists and no new authentication is - needed! It is, however, allowed to leave both empty, but then the - LDAP server must be readable with anonymous access. - - `--ldap.refresh-rate` is a floating point value in seconds. The - default is 300, which means that ArangoDB will refresh the - authorization information for authenticated users after at most 5 - minutes. This means that changes in the LDAP server like removed - users or added or removed roles for a user will be effective after - at most 5 minutes. - -Note that the `--ldap.server` and `--ldap.port` options can -alternatively be specified in the `--ldap.url` string together with -other configuration options. For details see Section "LDAP URLs" below. - -Here is an example on how to configure the connection to the LDAP server, -with anonymous bind: - - --ldap.enabled=true \ - --ldap.server=ldap.arangodb.com \ - --ldap.basedn=dc=arangodb,dc=com - -With this configuration ArangoDB binds anonymously to the LDAP server -on host `ldap.arangodb.com` on the default port 389 and executes all searches -under the base distinguished name `dc=arangodb,dc=com`. - -If we need a user to read in LDAP here is the example for it: - - --ldap.enabled=true \ - --ldap.server=ldap.arangodb.com \ - --ldap.basedn=dc=arangodb,dc=com \ - --ldap.binddn=uid=arangoadmin,dc=arangodb,dc=com \ - --ldap.bindpasswd=supersecretpassword - -The connection is identical but the searches will be executed with the -given distinguished name in `binddn`. - -Note here: -The given user (or the anonymous one) needs at least read access on -all user objects to find them and in the case of Roles search -also read access on the objects storing the roles. - -Up to this point ArangoDB can now connect to a given LDAP server -but it is not yet able to authenticate users properly with it. -For this pick one of the following two authentication methods. - -### LDAP URLs - -As an alternative one can specify the values of multiple LDAP related configuration -options by specifying a single LDAP URL. Here is an example: - - --ldap.url ldap://ldap.arangodb.com:1234/dc=arangodb,dc=com?uid?sub - -This one option has the combined effect of setting the following: - - --ldap.server=ldap.arangodb.com \ - --ldap.port=1234 \ - --ldap.basedn=dc=arangodb,dc=com \ - --ldap.searchAttribute=uid \ - --ldap.searchScope=sub - -That is, the LDAP URL consists of the LDAP *server* and *port*, a *basedn*, a -*search attribute* and a *scope* which can be one of *base*, *one* or -*sub*. There is also the possibility to use the `ldaps` protocol as in: - - --ldap.url ldaps://ldap.arangodb.com:636/dc=arangodb,dc=com?uid?sub - -This does exactly the same as the one above, except that it uses the -LDAP over TLS protocol. This is a non-standard method which does not -involve using the STARTTLS protocol. Note that this does not work in the -Windows version! We suggest to use the `ldap` protocol and STARTTLS -as described in the next section. - -### TLS options - -{% hint 'warning' %} -TLS is not supported in the Windows version of ArangoDB! -{% endhint %} - -To configure the usage of encrypted TLS to communicate with the LDAP server -the following options are available: - - - `--ldap.tls`: the main switch to active TLS. can either be - `true` (use TLS) or `false` (do not use TLS). It is switched - off by default. If you switch this on and do not use the `ldaps` - protocol via the [LDAP URL](#ldap-urls), then ArangoDB - will use the `STARTTLS` protocol to initiate TLS. This is the - recommended approach. - - `--ldap.tls-version`: the minimal TLS version that ArangoDB should accept. - Available versions are `1.0`, `1.1` and `1.2`. The default is `1.2`. If - your LDAP server does not support Version 1.2, you have to change - this setting. - - `--ldap.tls-cert-check-strategy`: strategy to validate the LDAP server - certificate. Available strategies are `never`, `hard`, - `demand`, `allow` and `try`. The default is `hard`. - - `--ldap.tls-cacert-file`: a file path to one or more (concatenated) - certificate authority certificates in PEM format. - As default no file path is configured. This certificate - is used to validate the server response. - - `--ldap.tls-cacert-dir`: a directory path to certificate authority certificates in - [c_rehash](https://www.openssl.org/docs/man1.0.2/apps/c_rehash.html) - format. As default no directory path is configured. - -Assuming you have the TLS CAcert file that is given to the server at -`/path/to/certificate.pem`, here is an example on how to configure TLS: - - - --ldap.tls true \ - --ldap.tls-cacert-file /path/to/certificate.pem - -You can use TLS with any of the following authentication mechanisms. - -### Esoteric options - -The following options can be used to configure advanced options for LDAP -connectivity: - - - `--ldap.serialized`: whether or not calls into the underlying LDAP library should be serialized. - This option can be used to work around thread-unsafe LDAP library functionality. - - `--ldap.serialize-timeout`: sets the timeout value that is used when waiting to enter the - LDAP library call serialization lock. This is only meaningful when `--ldap.serialized` has been - set to `true`. - - `--ldap.retries`: number of tries to attempt a connection. Setting this to values greater than - one will make ArangoDB retry to contact the LDAP server in case no connection can be made - initially. - -Please note that some of the following options are platform-specific and may not work -with all LDAP servers reliably: - - - `--ldap.restart`: whether or not the LDAP library should implicitly restart connections - - `--ldap.referrals`: whether or not the LDAP library should implicitly chase referrals - -The following options can be used to adjust the LDAP configuration on Linux and macOS -platforms only, but will not work on Windows: - - - `--ldap.debug`: turn on internal OpenLDAP library output (warning: will print to stdout). - - `--ldap.timeout`: timeout value (in seconds) for synchronous LDAP API calls (a value of 0 - means default timeout). - - `--ldap.network-timeout`: timeout value (in seconds) after which network operations - following the initial connection return in case of no activity (a value of 0 means default timeout). - - `--ldap.async-connect`: whether or not the connection to the LDAP library will be done - asynchronously. - -## Authentication methods - -In order to authenticate users in LDAP we have two options available. -We need to pick exactly one them. - -### Simple authentication method - -The simple authentication method is used if and only if both the -`--ldap.prefix` and `--ldap.suffix` configuration options are specified -and are non-empty. In all other cases the -["search" authentication method](#search-authentication-method) is used. - -In the "simple" method the LDAP bind user is derived from the ArangoDB -user name by prepending the value of the `--ldap.prefix` configuration -option and by appending the value of the `--ldap.suffix` configuration -option. For example, an ArangoDB user "alice" would be mapped to the -distinguished name `uid=alice,dc=arangodb,dc=com` to perform the LDAP -bind and authentication, if `--ldap.prefix` is set to `uid=` and -`--ldap.suffix` is set to `,dc=arangodb,dc=com`. - -ArangoDB binds to the LDAP server and authenticates with the -distinguished name and the password provided by the client. If -the LDAP server successfully verifies the password then the user is -authenticated. - -If you want to use this method add the following example to your -ArangoDB configuration together with the fundamental configuration: - - --ldap.prefix uid= \ - --ldap.suffix ,dc=arangodb,dc=com - -This method will authenticate an LDAP user with the distinguished name -`{PREFIX}{USERNAME}{SUFFIX}`, in this case for the arango user `alice` -it will search for: `uid=alice,dc=arangodb,dc=com`. -This distinguished name will be used as `{{USER}}` for the roles later on. - -### Search authentication method - -The search authentication method is used if at least one of the two -options `--ldap.prefix` and `--ldap.suffix` is empty or not specified. -ArangoDB uses the LDAP user credentials given by the `--ldap.binddn` and -`--ldap.bindpasswd` to perform a search for LDAP users. -In this case, the values of the options `--ldap.basedn`, -`--ldap.search-attribute`, `--ldap.search-filter` and `--ldap.search-scope` -are used in the following way: - - - `--ldap.search-scope` is an LDAP search scope with possible values - `base` (just search the base distinguished name), - `sub` (recursive search under the base distinguished name) or - `one` (search the base's immediate children) (default: `sub`) - - `--ldap.search-filter` is an LDAP filter expression which limits the - set of LDAP users being considered (default: `objectClass=*` which - means all objects) - - `--ldap.search-attribute` specifies the attribute in the user objects - which is used to match the ArangoDB user name (default: `uid`) - -Here is an example on how to configure the search method. -Assume we have users like the following stored in LDAP: - - dn: uid=alice,dc=arangodb,dc=com - uid: alice - objectClass: inetOrgPerson - objectClass: organizationalPerson - objectClass: top - objectClass: person - -Where `uid` is the username used in ArangoDB, and we only search -for objects of type `person` then we can add the following to our -fundamental LDAP configuration: - - --ldap.search-attribute=uid \ - --ldap.search-filter=objectClass=person - -This will use the `sub` search scope by default and will find -all `person` objects where the `uid` is equal to the given username. -From these the `dn` will be extracted and used as `{{USER}}` in -the roles later on. - -## Fetching roles for a user - -After authentication, the next step is to derive authorization -information from the authenticated LDAP user. -In order to fetch the roles and thereby the access rights -for a user we again have two possible options and need to pick -one of them. We can combine each authentication method -with each role method. -In any case a user can have no role or more than one. -If a user has no role the user will not get any access -to ArangoDB at all. -If a user has multiple roles with different rights -then the rights will be combined and the `strongest` -right will win. Example: - -- `alice` has the roles `project-a` and `project-b`. -- `project-a` has no access to collection `BData`. -- `project-b` has `rw` access to collection `BData`, -- hence `alice` will have `rw` on `BData`. - -Note that the actual database and collection access rights -will be configured in ArangoDB itself by roles in the users module. -The role name is always prefixed with `:role:`, e.g.: `:role:project-a` -and `:role:project-b` respectively. You can use the normal user -permissions tools in the Web interface or `arangosh` to configure these. - -### Roles attribute - -The most important method for this is to read off the roles an LDAP -user is associated with from an attribute in the LDAP user object. -If the configuration option - - --ldap.roles-attribute-name - -configuration option is set, then the value of that -option is the name of the attribute being used. - -Here is the example to add to the overall configuration: - - --ldap.roles-attribute-name=role - -If we have the user stored like the following in LDAP: - - dn: uid=alice,dc=arangodb,dc=com - uid: alice - objectClass: inetOrgPerson - objectClass: organizationalPerson - objectClass: top - objectClass: person - role: project-a - role: project-b - -Then the request will grant the roles `project-a` and `project-b` -for the user `alice` after successful authentication, -as they are stored within the `role` on the user object. - -### Roles search - -An alternative method for authorization is to conduct a search in the -LDAP server for LDAP objects representing roles a user has. If the -configuration option - - --ldap.roles-search= - -is given, then the string `{USER}` in `` is replaced -with the distinguished name of the authenticated LDAP user and the -resulting search expression is used to match distinguished names of -LDAP objects representing roles of that user. - -Example: - - --ldap.roles-search '(&(objectClass=groupOfUniqueNames)(uniqueMember={USER}))' - -After a LDAP user was found and authenticated as described in the -authentication section above the `{USER}` in the search expression -will be replaced by its distinguished name, e.g. `uid=alice,dc=arangodb,dc=com`, -and thus with the above search expression the actual search expression -would end up being: - - (&(objectClass=groupOfUniqueNames)(uniqueMember=uid=alice,dc=arangodb,dc=com})) - -This search will find all objects of `groupOfUniqueNames` where -at least one `uniqueMember` has the `dn` of `alice`. -The list of results of that search would be the list of roles given by -the values of the `dn` attributes of the found role objects. - -### Role transformations and filters - -For both of the above authorization methods there are further -configuration options to tune the role lookup. In this section we -describe these further options: - - - `--ldap.roles-include` can be used to specify a regular expression - that is used to filter roles. Only roles that match the regular - expression are used. - - - `--ldap.roles-exclude` can be used to specify a regular expression - that is used to filter roles. Only roles that do not match the regular - expression are used. - - - `--ldap.roles-transformation` can be used to specify a regular - expression and replacement text as `/re/text/`. This regular - expression is applied to the role name found. This is especially - useful in the roles-search variant to extract the real role name - out of the `dn` value. - - - `--ldap.superuser-role` can be used to specify the role associated - with the superuser. Any user belonging to this role gains superuser - status. This role is checked after applying the roles-transformation - expression. - -Example: - - --ldap.roles-include "^arangodb" - -will only consider roles that start with `arangodb`. - - --ldap.roles-exclude=disabled - -will only consider roles that do contain the word `disabled`. - - --ldap.superuser-role "arangodb-admin" - -anyone belonging to the group "arangodb-admin" will become a superuser. - -The roles-transformation deserves a larger example. Assume we are using -roles search and have stored roles in the following way: - - dn: cn=project-a,dc=arangodb,dc=com - objectClass: top - objectClass: groupOfUniqueNames - uniqueMember: uid=alice,dc=arangodb,dc=com - uniqueMember: uid=bob,dc=arangodb,dc=com - cn: project-a - description: Internal project A - - dn: cn=project-b,dc=arangodb,dc=com - objectClass: top - objectClass: groupOfUniqueNames - uniqueMember: uid=alice,dc=arangodb,dc=com - uniqueMember: uid=charlie,dc=arangodb,dc=com - cn: project-b - description: External project B - -In this case we will find `cn=project-a,dc=arangodb,dc=com` as one -role of `alice`. However we actually want to configure a role name: -`:role:project-a` which is easier to read and maintain for our -administrators. - -If we now apply the following transformation: - - --ldap.roles-transformation=/^cn=([^,]*),.*$/$1/ - -The regex will extract out `project-a` resp. `project-b` of the -`dn` attribute. - -In combination with the `superuser-role` we could make all -`project-a` members arangodb admins by using: - - --ldap.roles-transformation=/^cn=([^,]*),.*$/$1/ \ - --ldap.superuser-role=project-a - - -## Complete configuration examples - -In this section we would like to present complete examples -for a successful LDAP configuration of ArangoDB. -All of the following are just combinations of the details described above. - -**Simple authentication with role-search, using anonymous LDAP user** - -This example connects to the LDAP server with an anonymous read-only -user. We use the simple authentication mode (`prefix` + `suffix`) -to authenticate users and apply a role search for `groupOfUniqueNames` objects -where the user is a `uniqueMember`. Furthermore we extract only the `cn` -out of the distinguished role name. - - --ldap.enabled=true \ - --ldap.server=ldap.arangodb.com \ - --ldap.basedn=dc=arangodb,dc=com \ - --ldap.prefix uid= \ - --ldap.suffix ,dc=arangodb,dc=com \ - --ldap.roles-search '(&(objectClass=groupOfUniqueNames)(uniqueMember={USER}))' \ - --ldap.roles-transformation=/^cn=([^,]*),.*$/$1/ \ - --ldap.superuser-role=project-a - -**Search authentication with roles attribute using LDAP admin user having TLS enabled** - -This example connects to the LDAP server with a given distinguished name of an -admin user + password. -Furthermore we activate TLS and give the certificate file to validate server responses. -We use the search authentication searching for the `uid` attribute of `person` objects. -These `person` objects have `role` attribute(s) containing the role(s) of a user. - - --ldap.enabled=true \ - --ldap.server=ldap.arangodb.com \ - --ldap.basedn=dc=arangodb,dc=com \ - --ldap.binddn=uid=arangoadmin,dc=arangodb,dc=com \ - --ldap.bindpasswd=supersecretpassword \ - --ldap.tls true \ - --ldap.tls-cacert-file /path/to/certificate.pem \ - --ldap.search-attribute=uid \ - --ldap.search-filter=objectClass=person \ - --ldap.roles-attribute-name=role diff --git a/Documentation/Books/Manual/Programs/Arangod/Log.md b/Documentation/Books/Manual/Programs/Arangod/Log.md deleted file mode 100644 index e43303cdec7b..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Log.md +++ /dev/null @@ -1,211 +0,0 @@ -# ArangoDB Server Log Options - -## Log levels and topics - -ArangoDB's log output is grouped into topics. `--log.level` can be specified -multiple times at startup, for as many topics as needed. The log verbosity and -output files can be adjusted per log topic. For example - -``` ---log.level startup=trace --log.level queries=trace --log.level info -``` - -will log messages concerning startup at trace level, AQL queries at trace level -and everything else at info level. - -In a configuration file, it is written like this: - -``` -[log] -level = startup=trace -level = queries=trace -level = info -``` - -The available log levels are: - -- `fatal`: only logs fatal errors -- `error`: only logs errors -- `warning`: only logs warnings and errors -- `info`: logs information messages, warnings and errors -- `debug`: logs debug and information messages, warnings and errors -- `trace`: logs trace, debug and information messages, warnings and errors - -Note that levels `debug` and `trace` will be very verbose. - -See [Log Levels](../../Monitoring/LogLevels.md) in the Monitoring chapter for a -detailed description of the different levels. - -Some relevant log topics available in ArangoDB 3 are: - -- `agency`: information about the agency -- `collector`: information about the WAL collector's state -- `compactor`: information about the collection datafile compactor -- `datafiles`: datafile-related operations -- `mmap`: information about memory-mapping operations (including msync) -- `performance`: performance-related messages -- `queries`: executed AQL queries, slow queries -- `replication`: replication-related info -- `requests`: HTTP requests -- `startup`: information about server startup and shutdown -- `threads`: information about threads - -See more [log levels](../../../HTTP/AdministrationAndMonitoring/index.html#modify-and-return-the-current-server-log-level) - -### Log outputs - -The log option `--log.output ` allows directing the global -or per-topic log output to different outputs. The output definition `` -can be one of - -- `-` for stdin -- `+` for stderr -- `syslog://` -- `syslog:///` -- `file://` - -The option can be specified multiple times in order to configure the output -for different log topics. To set up a per-topic output configuration, use -`--log.output =`, e.g. - - queries=file://queries.txt - -logs all queries to the file "queries.txt". - -The old option `--log.file` is still available in 3.0 for convenience reasons. In -3.0 it is a shortcut for the more general option `--log.output file://filename`. - -The old option `--log.requests-file` is still available in 3.0. It is now a shortcut -for the more general option `--log.output requests=file://...`. - -Using `--log.output` also allows directing log output to different files based on -topics. For example, to log all AQL queries to a file "queries.log" one can use the -options: - -``` ---log.level queries=trace --log.output queries=file:///path/to/queries.log -``` - -To additionally log HTTP request to a file named "requests.log" add the options: - -``` ---log.level requests=info --log.output requests=file:///path/to/requests.log -``` - -If you specify `--log.file-mode octalvalue` then any newly created log -file will use "octalvalue" as file mode. Please note that the `umask` -value will be applied as well. - -If you specify `--log.file-group name` then any newly created log file -will try to use "name" as group name. Please note that you have to be -a member of that group. Otherwise the group ownership will not be -changed. Please note that this option is only available under Linux -and Mac. It is not available under Windows. - -### Forcing direct output - -The option `--log.force-direct` can be used to disable logging in an extra -logging thread. If set to `true`, any log messages are immediately printed in the -thread that triggered the log message. This is non-optimal for performance but -can aid debugging. If set to `false`, log messages are handed off to an extra -logging thread, which asynchronously writes the log messages. - -### Time format - -The option `--log.time-format` controls the time format used in log output. -The possible values for this option are: - -Format | Example | Description -:-----------------------|:------------------------ |:----------- -`timestamp` | 1553766923000 | unix timestamps, in seconds -`timestamp-millis` | 1553766923000.123 | unix timestamps, in seconds, with millisecond precision -`timestamp-micros` | 1553766923000.123456 | unix timestamps, in seconds, with microsecond precision -`uptime` | 987654 | seconds since server start -`uptime-millis` | 987654.123 | seconds since server start, with millisecond precision -`uptime-micros` | 987654.123456 | seconds since server start, with microsecond precision -`utc-datestring` | 2019-03-28T09:55:23Z | UTC-based date and time in format YYYY-MM-DDTHH:MM:SSZ -`utc-datestring-millis` | 2019-03-28T09:55:23.123Z | like `utc-datestring`, but with millisecond precision -`local-datestring` | 2019-03-28T10:55:23 | local date and time in format YYYY-MM-DDTHH:MM:SS - -### Escaping - -`--log.escape value` - -This option toggles the escaping of log output. - -If set to `true`, the following characters in the log output are escaped: - -* the carriage return character (hex 0d) -* the newline character (hex 0a) -* the tabstop character (hex 09) -* any other characters with an ordinal value less than hex 20 - -If the option is set to `false`, no characters are escaped. Characters with -an ordinal value less than hex 20 will not be printed in this mode but will -be replaced with a space character (hex 20). - -A side effect of turning off the escaping is that it will reduce the CPU -overhead for the logging. However, this will only be noticeable when logging -is set to a very verbose level (e.g. debug or trace). - -The default value for this option is `true`. - -### Color logging - -`--log.color value` - -Logging to terminal output is by default colored. Colorful logging can be -turned off by setting the value to false. - -### Source file and Line number - -Log line number: `--log.line-number` - -Normally, if an human readable fatal, error, warning or info message is -logged, no information about the file and line number is provided. The -file and line number is only logged for debug and trace message. This option -can be use to always log these pieces of information. - -### Prefix - -Log prefix: `--log.prefix prefix` - -This option is used specify an prefix to logged text. - -### Threads - -Log thread identifier: `--log.thread true` - -Whenever log output is generated, the process ID is written as part of the -log information. Setting this option appends the thread id of the calling -thread to the process id. For example, - -``` -2010-09-20T13:04:01Z [19355] INFO ready for business -``` - -when no thread is logged and - -``` -2010-09-20T13:04:17Z [19371-18446744072487317056] ready for business -``` - -when this command line option is set. - -To also log thread names, it is possible to set the `--log.thread-name` -option. By default `--log.thread-name` is set to `false`. - -### Role - -Log role: `--log.role true` - -When set to `true`, this option will make the ArangoDB logger print a single -character with the server's role into each logged message. The roles are: - -- U: undefined/unclear (used at startup) -- S: single server -- C: coordinator -- P: primary -- A: agent - -The default value for this option is `false`, so no roles will be logged. diff --git a/Documentation/Books/Manual/Programs/Arangod/Nonce.md b/Documentation/Books/Manual/Programs/Arangod/Nonce.md deleted file mode 100644 index 374f8ab0f4a2..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Nonce.md +++ /dev/null @@ -1,7 +0,0 @@ -# ArangoDB Server Nonce Options - -## Size - -`nonce.size` - -The size of the hash array for nonces. diff --git a/Documentation/Books/Manual/Programs/Arangod/Options.md b/Documentation/Books/Manual/Programs/Arangod/Options.md deleted file mode 100644 index 3638d17aa8ef..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Options.md +++ /dev/null @@ -1,29 +0,0 @@ -ArangoDB Server Options -======================= - -Usage: `arangod []` - -The database directory can be specified as positional (unnamed) first parameter: - - arangod /path/to/datadir - -Or explicitly as named parameter: - - arangod --database.directory /path/to/datadir - -All other parameters need to be passed as named parameters. -That is two hyphens followed by the option name, an equals sign or a space and -finally the parameter value. The value needs to be wrapped in double quote marks -if the value contains whitespace. Extra whitespace around `=` is allowed: - - arangod --database.directory = "/path with spaces/to/datadir" - -See [Configuration](../../Administration/Configuration/README.md) -if you want to translate startup parameters to configuration files -or learn more about startup options in general. - -See -[Fetch Current Configuration Options](../../Administration/Configuration/README.md#fetch-current-configuration-options) -if you want to query the `arangod` server for the current settings at runtime. - -@startDocuBlock program_options_arangod diff --git a/Documentation/Books/Manual/Programs/Arangod/Query.md b/Documentation/Books/Manual/Programs/Arangod/Query.md deleted file mode 100644 index 08928e0a6f51..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Query.md +++ /dev/null @@ -1,146 +0,0 @@ -# ArangoDB Server Query Options - -## Limiting memory for AQL queries - -`--query.memory-limit value` - -The default maximum amount of memory (in bytes) that a single AQL query can use. -When a single AQL query reaches the specified limit value, the query will be -aborted with a *resource limit exceeded* exception. In a cluster, the memory -accounting is done per shard, so the limit value is effectively a memory limit per -query per shard. - -The global limit value can be overridden per query by setting the *memoryLimit* -option value for individual queries when running an AQL query. - -The default value is *0*, meaning that there is no memory limit. - -## Turning AQL warnings into errors - -`--query.fail-on-warning value` - -When set to *true*, AQL queries that produce warnings will instantly abort and -throw an exception. This option can be set to catch obvious issues with AQL -queries early. When set to *false*, AQL queries that produce warnings will not -abort and return the warnings along with the query results. -The option can also be overridden for each individual AQL query. - -## Enable/disable AQL query tracking - -`--query.tracking flag` - -If *true*, the server's AQL slow query tracking feature will be enabled by -default. Tracking of queries can be disabled by setting the option to *false*. - -The default is *true*. - -## Enable/disable tracking of bind variables in AQL queries - -`--query.tracking-with-bindvars flag` - -If *true*, then the bind variables will be tracked and shown for all running -and slow AQL queries. When set to *true*, this will also enable the display of -bind variable values in the list of cached AQL query results. -This option only has an effect if `--query.tracking` was set to *true* or when -the query results cache is used. -Tracking and displaying bind variable values can be disabled by setting the option to *false*. - -The default is *true*. - -## Threshold for slow AQL queries - -`--query.slow-threshold value` - -By setting *value* it can be controlled after what execution time an AQL query -is considered "slow". Any slow queries that exceed the execution time specified -in *value* will be logged when they are finished. The threshold value is -specified in seconds. Tracking of slow queries can be turned off entirely by -setting the option `--query.tracking` to *false*. - -The default value is *10.0*. - -`--query.slow-streaming-threshold value` - -By setting *value* it can be controlled after what execution time streaming AQL -queries are considered "slow". This option exists to give streaming queries a -separate, potentially higher timeout value than regular queries. Streaming queries -are often executed in lockstep with application data processing logic, which then -also accounts for the queries' runtime. It is thus not unexpected if streaming -queries' lifetime is longer than the one of regular queries. - -The default value is *10.0*. - -## Limiting the number of query execution plans created by the AQL optimizer - -`--query.optimizer-max-plans value` - -By setting *value* it can be controlled how many different query execution plans -the AQL query optimizer will generate at most for any given AQL query. Normally -the AQL query optimizer will generate a single execution plan per AQL query, but -there are some cases in which it creates multiple competing plans. More plans -can lead to better optimized queries, however, plan creation has its costs. The -more plans are created and shipped through the optimization pipeline, the more -time will be spent in the optimizer. -Lowering *value* will make the optimizer stop creating additional plans when it -has already created enough plans. -Note that this setting controls the default maximum number of plans to create. The -value can still be adjusted on a per-query basis by setting the *maxNumberOfPlans* -attribute when running a query. - -The default value is *128*. - -## AQL Query results caching mode - -`--query.cache-mode` - -Toggles the AQL query results cache behavior. Possible values are: - -* *off*: do not use query results cache -* *on*: always use query results cache, except for queries that have their *cache* - attribute set to *false* -* *demand*: use query results cache only for queries that have their *cache* - attribute set to *true* - -## AQL Query results cache size - -`--query.cache-entries` - -Maximum number of query results that can be stored per database-specific query -results cache. If a query is eligible for caching and the number of items in the -database's query cache is equal to this threshold value, another cached query -result will be removed from the cache. - -This option only has an effect if the query cache mode is set to either *on* or -*demand*. - -The default value is *128*. - -`--query.cache-entries-max-size` - -Maximum cumulated size of query results that can be stored per database-specific -query results cache. When inserting a query result into the query results cache, -it is check if the total size of cached results would exceed this value, and if so, -another cached query result will be removed from the cache before inserting a new -one. - -This option only has an effect if the query cache mode is set to either *on* or -*demand*. - -The default value is *256 MB*. - -`--query.cache-entry-max-size` - -Maximum size of individual query results that can be stored in any database's query -results cache. Query results are only eligible for caching when their size does not exceed -this setting's value. - -The default value is *16 MB*. - -`--query.cache-include-system-collections` - -Whether or not to store results of queries that involve system collections in -the query results cache. Not storing these results is normally beneficial when using the -query results cache, as queries on system collections are internal to ArangoDB and will -only use space in the query results cache unnecessarily. - -The default value is *false*. diff --git a/Documentation/Books/Manual/Programs/Arangod/README.md b/Documentation/Books/Manual/Programs/Arangod/README.md deleted file mode 100644 index ec79701ca1a5..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/README.md +++ /dev/null @@ -1,8 +0,0 @@ -ArangoDB Server -=============== - -The ArangoDB daemon (_arangod_) is the central server binary, which can run in -different modes for a variety of setups like single server and clusters. - -See [Administration](../../Administration/README.md) for server configuration -and [Deployment](../../Deployment/README.md) for operation mode details. diff --git a/Documentation/Books/Manual/Programs/Arangod/Random.md b/Documentation/Books/Manual/Programs/Arangod/Random.md deleted file mode 100644 index ebae1fca11af..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Random.md +++ /dev/null @@ -1,7 +0,0 @@ -# ArangoDB Server Random Options - -## Random Number Generator - -`random.generator` - -Defines the type of random number generator to use. diff --git a/Documentation/Books/Manual/Programs/Arangod/Replication.md b/Documentation/Books/Manual/Programs/Arangod/Replication.md deleted file mode 100644 index 4ea870d29275..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Replication.md +++ /dev/null @@ -1,7 +0,0 @@ -# ArangoDB Server Replication Options - -## Active failover - -`replication.active-failover` - -Enable active-failover during asynchronous replication. diff --git a/Documentation/Books/Manual/Programs/Arangod/Rocksdb.md b/Documentation/Books/Manual/Programs/Arangod/Rocksdb.md deleted file mode 100644 index b83d2f399677..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Rocksdb.md +++ /dev/null @@ -1,324 +0,0 @@ -# ArangoDB Server RocksDB Options - -RocksDB is a highly configurable key-value store used to power our RocksDB -storage engine. Most of the options on this page are pass-through options to the -underlying RocksDB instance, and we change very few of their default settings. - -Depending on the [storage engine you have chosen](Server.md#storage-engine) -the availability and the scope of these options changes. - -In case you have chosen `mmfiles` some of the following options apply to -persistent indexes. -In case of `rocksdb` it will apply to all data stored as well as indexes. - -## Pass-through options - -`--rocksdb.wal-directory` - -Absolute path for the RocksDB WAL files. If left empty, this will use a -subdirectory `journals` inside the data directory. - -### Write buffers - -`--rocksdb.write-buffer-size` - -The amount of data to build up in each in-memory buffer (backed by a log file) -before closing the buffer and queuing it to be flushed into standard storage. -Default: 64MiB. Larger values may improve performance, especially for bulk -loads. - -`--rocksdb.max-write-buffer-number` - -The maximum number of write buffers that built up in memory. If this number is -reached before the buffers can be flushed, writes will be slowed or stalled. -Default: 2. - -`--rocksdb.total-write-buffer-size` - -The total amount of data to build up in all in-memory buffers (backed by log -files). This option, together with the block cache size configuration option, -can be used to limit memory usage. If set to 0, the memory usage is not limited. - -If set to a value larger than 0, this will cap memory usage for write buffers -but may have an effect on performance. If there is less than 4GiB of RAM on the -system, the default value is 512MiB. If there is more, the default is -`(system RAM size - 2GiB) * 0.5`. - -`--rocksdb.min-write-buffer-number-to-merge` - -Minimum number of write buffers that will be merged together when flushing to -normal storage. Default: 1. - -`--rocksdb.max-total-wal-size` - -Maximum total size of WAL files that, when reached, will force a flush of all -column families whose data is backed by the oldest WAL files. Setting this -to a low value will trigger regular flushing of column family data from memtables, -so that WAL files can be moved to the archive. -Setting this to a high value will avoid regular flushing but may prevent WAL -files from being moved to the archive and being removed. - -`--rocksdb.delayed-write-rate` (Hidden) - -Limited write rate to DB (in bytes per second) if we are writing to the last -in-memory buffer allowed and we allow more than 3 buffers. Default: 16MiB/s. - -### LSM tree structure - -`--rocksdb.num-levels` - -The number of levels for the database in the LSM tree. Default: 7. - -`--rocksdb.num-uncompressed-levels` - -The number of levels that do not use compression. The default value is 2. -Levels above this number will use Snappy compression to reduce the disk -space requirements for storing data in these levels. - -`--rocksdb.dynamic-level-bytes` - -If true, the amount of data in each level of the LSM tree is determined -dynamically so as to minimize the space amplification; otherwise, the level -sizes are fixed. The dynamic sizing allows RocksDB to maintain a well-structured -LSM tree regardless of total data size. Default: true. - -`--rocksdb.max-bytes-for-level-base` - -The maximum total data size in bytes in level-1 of the LSM tree. Only effective -if `--rocksdb.dynamic-level-bytes` is false. Default: 256MiB. - -`--rocksdb.max-bytes-for-level-multiplier` - -The maximum total data size in bytes for level L of the LSM tree can be -calculated as `max-bytes-for-level-base * (max-bytes-for-level-multiplier ^ -(L-1))`. Only effective if `--rocksdb.dynamic-level-bytes` is false. Default: -10. - -`--rocksdb.level0-compaction-trigger` - -Compaction of level-0 to level-1 is triggered when this many files exist in -level-0. Setting this to a higher number may help bulk writes at the expense of -slowing down reads. Default: 2. - -`--rocksdb.level0-slowdown-trigger` - -When this many files accumulate in level-0, writes will be slowed down to -`--rocksdb.delayed-write-rate` to allow compaction to catch up. Default: 20. - -`--rocksdb.level0-stop-trigger` - -When this many files accumulate in level-0, writes will be stopped to allow -compaction to catch up. Default: 36. - -### File I/O - -`--rocksdb.compaction-read-ahead-size` - -If non-zero, we perform bigger reads when doing compaction. If you're running -RocksDB on spinning disks, you should set this to at least 2MiB. That way -RocksDB's compaction is doing sequential instead of random reads. Default: 0. - -`--rocksdb.use-direct-reads` (Hidden) - -Only meaningful on Linux. If set, use `O_DIRECT` for reading files. Default: -false. - -`--rocksdb.use-direct-io-for-flush-and-compaction` (Hidden) - -Only meaningful on Linux. If set, use `O_DIRECT` for writing files. Default: false. - -`--rocksdb.use-fsync` (Hidden) - -If set, issue an `fsync` call when writing to disk (set to false to issue -`fdatasync` only. Default: false. - -`--rocksdb.allow-fallocate` - -Allow RocksDB to use the fallocate call. If false, fallocate calls are bypassed -and no preallocation is done. Preallocation is turned on by default, but can be -turned off for operating system versions that are known to have issues with it. -This option only has an effect on operating systems that support fallocate. - -`--rocksdb.limit-open-files-at-startup` - -If set to true, this will limit the amount of .sst files RocksDB will inspect at -startup, which can reduce the number of IO operations performed at start. - -`--rocksdb.block-align-data-blocks` - -If true, data blocks are aligned on the lesser of page size and block size, -which may waste some memory but may reduce the number of cross-page I/O operations. - -### Background tasks - -`--rocksdb.max-background-jobs` - -Maximum number of concurrent background compaction jobs, submitted to the low -priority thread pool. Default: number of processors. - -`--rocksdb.num-threads-priority-high` - -Number of threads for high priority operations (e.g. flush). We recommend -setting this equal to `max-background-flushes`. Default: number of processors / 2. - -`--rocksdb.num-threads-priority-low` - -Number of threads for low priority operations (e.g. compaction). Default: number of processors / 2. - -### Caching - -`--rocksdb.block-cache-size` - -This is the maximum size of the block cache in bytes. Increasing this may improve -performance. If there is less than 4GiB of RAM on the system, the default value -is 256MiB. If there is more, the default is `(system RAM size - 2GiB) * 0.3`. - -`--rocksdb.enforce-block-cache-size-limit` - -Whether or not the maximum size of the RocksDB block cache is strictly enforced. -This option can be set to limit the memory usage of the block cache to at most the -specified size. If then inserting a data block into the cache would exceed the -cache's capacity, the data block will not be inserted. If the flag is not set, -a data block may still get inserted into the cache. It is evicted later, but the -cache may temporarily grow beyond its capacity limit. - -`--rocksdb.block-cache-shard-bits` - -The number of bits used to shard the block cache to allow concurrent operations. -To keep individual shards at a reasonable size (i.e. at least 512KB), keep this -value to at most `block-cache-shard-bits / 512KB`. Default: `block-cache-size / -2^19`. - -`--rocksdb.table-block-size` - -Approximate size of user data (in bytes) packed per block for uncompressed data. - -`--rocksdb.recycle-log-file-num` (Hidden) - -If true, keeps a pool of log files around for recycling them. The default -value is false. - -### Miscellaneous - -`--rocksdb.optimize-filters-for-hits` (Hidden) - -This flag specifies that the implementation should optimize the filters mainly -for cases where keys are found rather than also optimize for the case where -keys are not. This would be used in cases where the application knows that -there are very few misses or the performance in the case of misses is not as -important. Default: false. - -`--rocksdb.wal-recovery-skip-corrupted` (Hidden) - -If true, skip corrupted records in WAL recovery. Default: false. - -## Non-Pass-Through Options - -`--rocksdb.wal-file-timeout` - -Timeout after which unused WAL files are deleted (in seconds). Default: 10.0s. - -Data of ongoing transactions is stored in RAM. Transactions that get too big -(in terms of number of operations involved or the total size of data created or -modified by the transaction) will be committed automatically. Effectively this -means that big user transactions are split into multiple smaller RocksDB -transactions that are committed individually. The entire user transaction will -not necessarily have ACID properties in this case. - -The following options can be used to control the RAM usage and automatic -intermediate commits for the RocksDB engine: - -`--rocksdb.wal-file-timeout-initial` (Hidden) - -Timeout after which deletion of unused WAL files kicks in after server start -(in seconds). Default: 180.0s - -By decreasing this option's value, the server will start the removal of obsolete -WAL files earlier after server start. This is useful in testing environments that -are space-restricted and do not require keeping much WAL file data at all. - -`--rocksdb.wal-archive-size-limit` - -Maximum total size (in bytes) of archived WAL files to keep on a leader. -A value of `0` will not restrict the size of the archive, so the leader will -removed archived WAL files when there are no replication clients needing them. -Any non-zero value will restrict the size of the WAL files archive to about the -specified value and trigger WAL archive file deletion once the threshold is reached. -Please note that the value is only a threshold, so the archive may get bigger than -the configured value until the background thread actually deletes files from -the archive. Also note that deletion from the archive will only kick in after -`--rocksdb.wal-file-timeout-initial` seconds have elapsed after server start. - -The default value is `0` (i.e. unlimited). - -When setting the value to a size bigger than 0, the RocksDB storage engine -will force a removal of archived WAL files if the total size of the archive -exceeds the configured size. The option can be used to get rid of archived -WAL files in a disk size-constrained environment. - -Note that archived WAL files are normally deleted automatically after a -short while when there is no follower attached that may read from the archive. -However, in case when there are followers attached that may read from the -archive, WAL files normally remain in the archive until their contents have -been streamed to the followers. In case there are slow followers that cannot -catch up this will cause a growth of the WAL files archive over time. - -The option `--rocksdb.wal-archive-size-limit` can now be used to force a -deletion of WAL files from the archive even if there are followers attached -that may want to read the archive. In case the option is set and a leader -deletes files from the archive that followers want to read, this will abort -the replication on the followers. Followers can however restart the replication -doing a resync. - -`--rocksdb.max-transaction-size` - -Transaction size limit (in bytes). Transactions store all keys and values in -RAM, so large transactions run the risk of causing out-of-memory situations. -This setting allows you to ensure that does not happen by limiting the size of -any individual transaction. Transactions whose operations would consume more -RAM than this threshold value will abort automatically with error 32 ("resource -limit exceeded"). - -`--rocksdb.intermediate-commit-size` - -If the size of all operations in a transaction reaches this threshold, the -transaction is committed automatically and a new transaction is started. The -value is specified in bytes. - -`--rocksdb.intermediate-commit-count` - -If the number of operations in a transaction reaches this value, the transaction -is committed automatically and a new transaction is started. - -`--rocksdb.throttle` - -If enabled, throttles the ingest rate of writes if necessary to reduce chances -of compactions getting too far behind and blocking incoming writes. This option -is `true` by default. - -`--rocksdb.sync-interval` - -The interval (in milliseconds) that ArangoDB will use to automatically -synchronize data in RocksDB's write-ahead logs to disk. Automatic syncs will -only be performed for not-yet synchronized data, and only for operations that -have been executed without the *waitForSync* attribute. - -Note: this option is not supported on Windows platforms. Setting the option to -a value greater 0 will produce a startup warning. - -`--rocksdb.use-file-logging` - -When set to *true*, enables writing of RocksDB's own informational LOG files into -RocksDB's database directory. - -This option is turned off by default, but can be enabled for debugging RocksDB -internals and performance. - -`--rocksdb.debug-logging` - -When set to *true*, enables verbose logging of RocksDB's actions into the logfile -written by ArangoDB (if option `--rocksdb.use-file-logging` is off) or RocksDB's -own log (if option `--rocksdb.use-file-logging` is on). - -This option is turned off by default, but can be enabled for debugging RocksDB -internals and performance. diff --git a/Documentation/Books/Manual/Programs/Arangod/Server.md b/Documentation/Books/Manual/Programs/Arangod/Server.md deleted file mode 100644 index 1bb1e7e271e1..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Server.md +++ /dev/null @@ -1,276 +0,0 @@ -# ArangoDB Server _Server_ Options - -## Managing Endpoints - -The ArangoDB server can listen for incoming requests on multiple *endpoints*. - -The endpoints are normally specified either in ArangoDB's configuration file or -on the command-line like `--server.endpoint`. ArangoDB supports different -types of endpoints: - -- tcp://ipv4-address:port - TCP/IP endpoint, using IPv4 -- tcp://[ipv6-address]:port - TCP/IP endpoint, using IPv6 -- ssl://ipv4-address:port - TCP/IP endpoint, using IPv4, SSL encryption -- ssl://[ipv6-address]:port - TCP/IP endpoint, using IPv6, SSL encryption -- unix:///path/to/socket - Unix domain socket endpoint - -If a TCP/IP endpoint is specified without a port number, then the default port -(8529) will be used. If multiple endpoints need to be used, the option can be -repeated multiple times. - -The default endpoint for ArangoDB is *tcp://127.0.0.1:8529* or -*tcp://localhost:8529*. - -**Examples** - -``` -unix> ./arangod --server.endpoint tcp://127.0.0.1:8529 - --server.endpoint ssl://127.0.0.1:8530 - --ssl.keyfile server.pem /tmp/vocbase -2019-05-06T07:30:42Z [9228] INFO ArangoDB 3.4.5 [linux] 64bit, using jemalloc, build tags/v3.4.5-0-g648fbb8191, VPack 0.1.33, RocksDB 5.16.0, ICU 58.1, V8 5.7.492.77, OpenSSL 1.1.0j 20 Nov 2018 -2019-05-06T07:30:43Z [9228] INFO {authentication} Jwt secret not specified, generating... -2019-05-06T07:30:43Z [9228] INFO using storage engine rocksdb -2019-05-06T07:30:43Z [9228] INFO {cluster} Starting up with role SINGLE -2019-05-06T07:50:53Z [9228] INFO {syscall} file-descriptors (nofiles) hard limit is 1048576, soft limit is 1048576 -2019-05-06T07:50:53Z [9228] INFO {authentication} Authentication is turned on (system only), authentication for unix sockets is turned on -2019-05-06T07:30:43Z [9228] INFO using endpoint 'http+tcp://127.0.0.1:8529' for non-encrypted requests -2019-05-06T07:30:43Z [9228] INFO using endpoint 'http+ssl://127.0.0.1:8530' for ssl-encrypted requests -2019-05-06T07:30:44Z [9228] INFO ArangoDB (version 3.4.5 [linux]) is ready for business. Have fun! -``` - -Given a hostname: - -`--server.endpoint tcp://hostname:port` - -Given an IPv4 address: - -`--server.endpoint tcp://ipv4-address:port` - -Given an IPv6 address: - -`--server.endpoint tcp://[ipv6-address]:port` - -On one specific ethernet interface each port can only be bound **exactly -once**. You can look up your available interfaces using the *ifconfig* command -on Linux / macOS - the Windows equivalent is *ipconfig* (see -[Wikipedia for more details](http://en.wikipedia.org/wiki/Ifconfig)). -The general names of the interfaces differ on OS's and hardwares they run on. -However, typically every host has a so called -[loopback interface](http://en.wikipedia.org/wiki/Loop_device), -which is a virtual interface. By convention it always has the address -*127.0.0.1* or *::1* (ipv6), and can only be reached from exactly the very same -host. Ethernet interfaces usually have names like *eth0*, *wlan0*, *eth1:17*, -*le0* or a plain text name in Windows. - -To find out which services already use ports (so ArangoDB can't bind them -anymore), you can use the -[netstat command](http://en.wikipedia.org/wiki/Netstat) -(it behaves a little different on each platform, run it with *-lnpt* on Linux, -*-p tcp* on macOS or with *-an* on windows for valuable information). - -ArangoDB can also do a so called *broadcast bind* using -*tcp://0.0.0.0:8529*. This way it will be reachable on all interfaces of the -host. This may be useful on development systems that frequently change their -network setup like laptops. - -### Special note on IPv6 link-local addresses - -ArangoDB can also listen to IPv6 link-local addresses via adding the zone ID -to the IPv6 address in the form `[ipv6-link-local-address%zone-id]`. However, -what you probably instead want is to bind to a local IPv6 address. Local IPv6 -addresses start with `fd`. If you only see a `fe80:` IPv6 address in your -interface configuration but no IPv6 address starting with `fd` your interface -has no local IPv6 address assigned. You can read more about IPv6 link-local -addresses [here](https://en.wikipedia.org/wiki/Link-local_address#IPv6). - -**Example** - -Bind to a link-local and local IPv6 address. - - unix> ifconfig - -This command lists all interfaces and assigned ip addresses. The link-local -address may be `fe80::6257:18ff:fe82:3ec6%eth0` (IPv6 address plus interface name). -A local IPv6 address may be `fd12:3456::789a`. To bind ArangoDB to it start -*arangod* with `--server.endpoint tcp://[fe80::6257:18ff:fe82:3ec6%eth0]:8529`. -Use telnet to test the connection. - - unix> telnet fe80::6257:18ff:fe82:3ec6%eth0 8529 - Trying fe80::6257:18ff:fe82:3ec6... - Connected to my-machine. - Escape character is '^]'. - GET / HTTP/1.1 - - HTTP/1.1 301 Moved Permanently - Location: /_db/_system/_admin/aardvark/index.html - Content-Type: text/html - Server: ArangoDB - Connection: Keep-Alive - Content-Length: 197 - - Moved

Moved

This page has moved to /_db/_system/_admin/aardvark/index.html.

- -### Reuse address - -`--tcp.reuse-address` - -If this boolean option is set to *true* then the socket option SO_REUSEADDR is -set on all server endpoints, which is the default. If this option is set to -*false* it is possible that it takes up to a minute after a server has -terminated until it is possible for a new server to use the same endpoint -again. This is why this is activated by default. - -Please note however that under some operating systems this can be a security -risk because it might be possible for another process to bind to the same -address and port, possibly hijacking network traffic. Under Windows, ArangoDB -additionally sets the flag SO_EXCLUSIVEADDRUSE as a measure to alleviate this -problem. - -### Backlog size - -`--tcp.backlog-size` - -Allows to specify the size of the backlog for the *listen* system call The -default value is 10. The maximum value is platform-dependent. Specifying a -higher value than defined in the system header's SOMAXCONN may result in a -warning on server start. The actual value used by *listen* may also be silently -truncated on some platforms (this happens inside the *listen* system call). - -## Maximal queue size - -Maximum size of the queue for requests: `--server.maximal-queue-size -size` - -Specifies the maximum *size* of the queue for asynchronous task -execution. If the queue already contains *size* tasks, new tasks will -be rejected until other tasks are popped from the queue. Setting this -value may help preventing from running out of memory if the queue is -filled up faster than the server can process requests. - -## Storage engine - -ArangoDB's "traditional" storage engine is called `MMFiles`, which also was the -default storage engine up to including ArangoDB 3.3. - -Since ArangoDB 3.2, an alternative engine based on [RocksDB](http://rocksdb.org) -is also provided and could be turned on manually. Since ArangoDB 3.4, the RocksDB -storage engine is the default storage engine for new installations. - -One storage engine type is supported per server per installation. -Live switching of storage engines on already installed systems isn't supported. -Configuring the wrong engine (not matching the previously used one) will result -in the server refusing to start. You may however use `auto` to let ArangoDB choose -the previously used one. - -`--server.storage-engine [auto|mmfiles|rocksdb]` - -Note that `auto` will default to `rocksdb` starting with ArangoDB 3.4, but in -previous versions it defaulted to `mmfiles`. - -## Check max memory mappings - -`--server.check-max-memory-mappings` can be used on Linux to make arangod -check the number of memory mappings currently used by the process (as reported in -`/proc//maps`) and compare it with the maximum number of allowed mappings as -determined by */proc/sys/vm/max_map_count*. If the current number of memory -mappings gets near the maximum allowed value, arangod will log a warning -and disallow the creation of further V8 contexts temporarily until the current -number of mappings goes down again. - -If the option is set to false, no such checks will be performed. All non-Linux -operating systems do not provide this option and will ignore it. - -## Enable/disable authentication - -@startDocuBlock server_authentication - -## JWT Secret - -`--server.jwt-secret secret` - -ArangoDB will use JWTs to authenticate requests. Using this option let's -you specify a JWT. When specified, the JWT secret must be at most 64 bytes -long. - -In single server setups and when not specifying this secret ArangoDB will -generate a secret. - -In cluster deployments which have authentication enabled a secret must -be set consistently across all cluster nodes so they can talk to each other. - -## Enable/disable authentication for UNIX domain sockets - -`--server.authentication-unix-sockets value` - -Setting *value* to true will turn off authentication on the server side -for requests coming in via UNIX domain sockets. With this flag enabled, -clients located on the same host as the ArangoDB server can use UNIX domain -sockets to connect to the server without authentication. -Requests coming in by other means (e.g. TCP/IP) are not affected by this option. - -The default value is *false*. - -**Note**: this option is only available on platforms that support UNIX -domain sockets. - -## Enable/disable authentication for system API requests only - -@startDocuBlock serverAuthenticateSystemOnly - -## Enable authentication cache timeout - -`--server.authentication-timeout value` - -Sets the cache timeout to *value* (in seconds). This is only necessary -if you use an external authentication system like LDAP. - -## Enable local authentication - -`--server.local-authentication value` - -If set to *false* only use the external authentication system. If -*true* also use the local *_users* collections. - -The default value is *true*. - -## Server threads - -`--server.minimal-threads number` - -`--server.maximal-threads number` - -Specifies the *number* of threads that are spawned to handle requests. - -The actual number of request processing threads is adjusted dynamically at runtime -and will float between `--server.minimal-threads` and `--server.maximal-threads`. - -`--server.minimal-threads` determines the minimum number of request processing -threads the server will start and that will always be kept around. The default -value is *2*. - -`--server.maximal-threads` determines the maximum number of request processing -threads the server is allowed to start for request handling. If that number of -threads is already running, arangod will not start further threads for request -handling. The default value is - -## Toggling server statistics - -`--server.statistics value` - -If this option is *value* is *false*, then ArangoDB's statistics gathering -is turned off. Statistics gathering causes regular background CPU activity and -memory usage, so using this option to turn statistics off might relieve heavily-loaded -instances a bit. - -## Data source flush synchronization - -`--server.flush-interval` - -ArangoDB will periodically ensure that all data sources (databases, views, etc.) -have flushed all committed data to disk and write some checkpoint data to aid in -future recovery. Increasing this value will result in fewer, larger write -batches, while decreasing it will result in more, smaller writes. Setting the -value too low can easily overwhelm the server, while setting the value too high -may result in high memory usage and periodic slowdowns. Value is given in -microseconds, with a typical range of 100000 (100ms) to 10000000 (10s) and a -default of 1000000 (1s). Use caution when changing from the default. diff --git a/Documentation/Books/Manual/Programs/Arangod/Ssl.md b/Documentation/Books/Manual/Programs/Arangod/Ssl.md deleted file mode 100644 index 3ec69563b09e..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Ssl.md +++ /dev/null @@ -1,171 +0,0 @@ -# ArangoDB Server SSL Options - -## SSL Endpoints - -Given a hostname: - -`--server.endpoint ssl://hostname:port` - -Given an IPv4 address: - -`--server.endpoint ssl://ipv4-address:port` - -Given an IPv6 address: - -`--server.endpoint ssl://[ipv6-address]:port` - -**Note**: If you are using SSL-encrypted endpoints, you must also supply the -path to a server certificate using the `--ssl.keyfile` option. - -### Keyfile - -`--ssl.keyfile filename` - -If SSL encryption is used, this option must be used to specify the filename of -the server private key. The file must be PEM formatted and contain both the -certificate and the server's private key. - -The file specified by *filename* can be generated using OpenSSL: - -``` -# create private key in file "server.key" -openssl genpkey -out server.key -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -aes-128-cbc - -# create certificate signing request (csr) in file "server.csr" -openssl req -new -key server.key -out server.csr - -# copy away original private key to "server.key.org" -cp server.key server.key.org - -# remove passphrase from the private key -openssl rsa -in server.key.org -out server.key - -# sign the csr with the key, creates certificate PEM file "server.crt" -openssl x509 -req -days 365 -in server.csr -signkey server.key -out server.crt - -# combine certificate and key into single PEM file "server.pem" -cat server.crt server.key > server.pem -``` - -You may use certificates issued by a Certificate Authority or self-signed -certificates. Self-signed certificates can be created by a tool of your -choice. When using OpenSSL for creating the self-signed certificate, the -following commands should create a valid keyfile: - -``` ------BEGIN CERTIFICATE----- - -(base64 encoded certificate) - ------END CERTIFICATE----- ------BEGIN RSA PRIVATE KEY----- - -(base64 encoded private key) - ------END RSA PRIVATE KEY----- -``` - -For further information please check the manuals of the tools you use to create -the certificate. - -### CA File - -`--ssl.cafile filename` - -This option can be used to specify a file with CA certificates that are sent to -the client whenever the server requests a client certificate. If the file is -specified, The server will only accept client requests with certificates issued -by these CAs. Do not specify this option if you want clients to be able to -connect without specific certificates. - -The certificates in *filename* must be PEM formatted. - -### SSL protocol - -`--ssl.protocol value` - -Use this option to specify the default encryption protocol to be used. The -following variants are available: - -- 1: SSLv2 (unsupported) -- 2: SSLv2 or SSLv3 (negotiated) -- 3: SSLv3 -- 4: TLSv1 -- 5: TLSv1.2 - -The default *value* is 5 (TLSv1.2). - -Note that SSLv2 is unsupported as of ArangoDB 3.4, because of the inherent -security vulnerabilities in this protocol. Selecting SSLv2 as protocol will -abort the startup. - -### SSL cache - -`--ssl.session-cache value` - -Set to true if SSL session caching should be used. - -*value* has a default value of *false* (i.e. no caching). - -### SSL peer certificate - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -`--ssl.require-peer-certificate` - -Require a peer certificate from the client before connecting. - -### SSL options - -`--ssl.options value` - -This option can be used to set various SSL-related options. Individual option -values must be combined using bitwise OR. - -Which options are available on your platform is determined by the OpenSSL -version you use. The list of options available on your platform might be -retrieved by the following shell command: - -``` - > grep "#define SSL_OP_.*" /usr/include/openssl/ssl.h - - #define SSL_OP_MICROSOFT_SESS_ID_BUG 0x00000001L - #define SSL_OP_NETSCAPE_CHALLENGE_BUG 0x00000002L - #define SSL_OP_LEGACY_SERVER_CONNECT 0x00000004L - #define SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG 0x00000008L - #define SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG 0x00000010L - #define SSL_OP_MICROSOFT_BIG_SSLV3_BUFFER 0x00000020L - ... -``` - -A description of the options can be found online in the -[OpenSSL documentation](http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html) - -### SSL cipher - -`--ssl.cipher-list cipher-list` - -This option can be used to restrict the server to certain SSL ciphers only, and -to define the relative usage preference of SSL ciphers. - -The format of *cipher-list* is documented in the OpenSSL documentation. - -To check which ciphers are available on your platform, you may use the -following shell command: - -``` -> openssl ciphers -v - -ECDHE-RSA-AES256-SHA SSLv3 Kx=ECDH Au=RSA Enc=AES(256) Mac=SHA1 -ECDHE-ECDSA-AES256-SHA SSLv3 Kx=ECDH Au=ECDSA Enc=AES(256) Mac=SHA1 -DHE-RSA-AES256-SHA SSLv3 Kx=DH Au=RSA Enc=AES(256) Mac=SHA1 -DHE-DSS-AES256-SHA SSLv3 Kx=DH Au=DSS Enc=AES(256) Mac=SHA1 -DHE-RSA-CAMELLIA256-SHA SSLv3 Kx=DH Au=RSA Enc=Camellia(256) -Mac=SHA1 -... -``` - -The default value for *cipher-list* is "ALL". diff --git a/Documentation/Books/Manual/Programs/Arangod/Tcp.md b/Documentation/Books/Manual/Programs/Arangod/Tcp.md deleted file mode 100644 index 5c276a9e3e0d..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Tcp.md +++ /dev/null @@ -1,13 +0,0 @@ -# ArangoDB Server TCP Options - -## Backlog Size - -`tcp.backlog-size` - -Listen backlog size. - -## Re-use address - -`tcp.reuse-address` - -Try to reuse TCP port(s). diff --git a/Documentation/Books/Manual/Programs/Arangod/Temp.md b/Documentation/Books/Manual/Programs/Arangod/Temp.md deleted file mode 100644 index 4d0c832ba147..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Temp.md +++ /dev/null @@ -1,7 +0,0 @@ -# ArangoDB Server Temp Options - -## Path - -`temp.path` - -Path for temporary files. diff --git a/Documentation/Books/Manual/Programs/Arangod/Ttl.md b/Documentation/Books/Manual/Programs/Arangod/Ttl.md deleted file mode 100644 index eb255c54b388..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Ttl.md +++ /dev/null @@ -1,45 +0,0 @@ -# TTL (time-to-live) Options - -## TTL background thread frequency - -`--ttl.frequency` - -The frequency for invoking the TTL background removal thread. The value for this -option is specified in milliseconds. -The lower this value, the more frequently the TTL background thread will kick in -and scan all available TTL indexes for expired documents, and the earlier the -expired documents will actually be removed. - -## TTL maximum total removals - -`--ttl.max-total-removes` - -In order to avoid "random" load spikes by the background thread suddenly kicking -in and removing a lot of documents at once, the number of to-be-removed documents -per thread invocation can be capped. - -The TTL background thread will go back to sleep once it has removed the configured -number of documents in one iteration. If more candidate documents are left for -removal, they will be removed in following runs of the background thread. - -## TTL maximum per-collection removals - -`--ttl.max-collection-removes` - -This option controls the maximum number of documents to be removed per collection -in each background thread run. This value can be configured separately from the -total removal amount so that the per-collection time window for locking and potential -write-write conflicts can be reduced. - -## TTL only for loaded collections - -`--ttl.only-loaded-collection` - -This option will only scan TTL indexes of collections that are already loaded into -memory. Setting the option to false will make the background thread ignore collections -that are currently not loaded. This saves the background thread from loading all -collections into memory. - -This option is useful for the MMFiles engine only, where collections need to be -loaded into memory first. For the RocksDB engine, collections do not need to be loaded -into memory first, so this option does not make any difference. diff --git a/Documentation/Books/Manual/Programs/Arangod/Vst.md b/Documentation/Books/Manual/Programs/Arangod/Vst.md deleted file mode 100644 index 5e608147309e..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Vst.md +++ /dev/null @@ -1,9 +0,0 @@ -# ArangoDB Server VST Options - -VST is an acronym for [VelocyStream](https://github.com/arangodb/velocystream). - -## Maxsize - -`vst.maxsize` - -Maximal size (in bytes) for a VelocyPack chunk. diff --git a/Documentation/Books/Manual/Programs/Arangod/Wal.md b/Documentation/Books/Manual/Programs/Arangod/Wal.md deleted file mode 100644 index c3a08a0e30b2..000000000000 --- a/Documentation/Books/Manual/Programs/Arangod/Wal.md +++ /dev/null @@ -1,183 +0,0 @@ -# ArangoDB Server WAL Options (MMFiles) - -WAL is an acronym for write-ahead log. - -The write-ahead log is a sequence of logfiles that are written in an append-only -fashion. Full logfiles will eventually be garbage-collected, and the relevant data -might be transferred into collection journals and datafiles. Unneeded and already -garbage-collected logfiles will either be deleted or kept for the purpose of keeping -a replication backlog. - -Since ArangoDB 2.2, the MMFiles storage engine will write all data-modification -operations into its write-ahead log. - -With ArangoDB 3.2 another storage engine option becomes available: -[RocksDB](../../Architecture/StorageEngines.md#rocksdb). -In case of using RocksDB the subsequent options don't have a useful meaning. - -## Directory - - - -The WAL logfiles directory: `--wal.directory` - -Specifies the directory in which the write-ahead logfiles should be -stored. If this option is not specified, it defaults to the subdirectory -*journals* in the server's global database directory. If the directory is -not present, it will be created. - -## Logfile size - - - -@startDocuBlock WalLogfileSize - -## Allow oversize entries - - - -@startDocuBlock WalLogfileAllowOversizeEntries - -## Number of reserve logfiles - - - -@startDocuBlock WalLogfileReserveLogfiles - -## Number of historic logfiles - - - -@startDocuBlock WalLogfileHistoricLogfiles - -## Sync interval - - - -@startDocuBlock WalLogfileSyncInterval - -## Flush timeout - - - -@startDocuBlock WalLogfileFlushTimeout - -## Throttling - - - -Throttle writes to WAL when at least such many operations are -waiting for garbage collection: -`--wal.throttle-when-pending` - -The maximum value for the number of write-ahead log garbage-collection -queue elements. If set to *0*, the queue size is unbounded, and no -write-throttling will occur. If set to a non-zero value, write-throttling -will automatically kick in when the garbage-collection queue contains at -least as many elements as specified by this option. -While write-throttling is active, data-modification operations will -intentionally be delayed by a configurable amount of time. This is to -ensure the write-ahead log garbage collector can catch up with the -operations executed. -Write-throttling will stay active until the garbage-collection queue size -goes down below the specified value. -Write-throttling is turned off by default. - -`--wal.throttle-wait` - -This option determines the maximum wait time (in milliseconds) for -operations that are write-throttled. If write-throttling is active and a -new write operation is to be executed, it will wait for at most the -specified amount of time for the write-ahead log garbage-collection queue -size to fall below the throttling threshold. If the queue size decreases -before the maximum wait time is over, the operation will be executed -normally. If the queue size does not decrease before the wait time is -over, the operation will be aborted with an error. -This option only has an effect if `--wal.throttle-when-pending` has a -non-zero value, which is not the default. - -## Number of slots - - - -Maximum number of slots to be used in parallel: -`--wal.slots` - -Configures the amount of write slots the write-ahead log can give to write -operations in parallel. Any write operation will lease a slot and return -it to the write-ahead log when it is finished writing the data. A slot will -remain blocked until the data in it was synchronized to disk. After that, -a slot becomes reusable by following operations. The required number of -slots is thus determined by the parallelism of write operations and the -disk synchronization speed. Slow disks probably need higher values, and -fast disks may only require a value lower than the default. - -## Ignore logfile errors - - - -Ignore logfile errors when opening logfiles: -`--wal.ignore-logfile-errors` - -Ignores any recovery errors caused by corrupted logfiles on startup. When -set to *false*, the recovery procedure on startup will fail with an error -whenever it encounters a corrupted (that includes only half-written) -logfile. This is a security precaution to prevent data loss in case of disk -errors etc. When the recovery procedure aborts because of corruption, any -corrupted files can be inspected and fixed (or removed) manually and the -server can be restarted afterwards. - -Setting the option to *true* will make the server continue with the recovery -procedure even in case it detects corrupt logfile entries. In this case it -will stop at the first corrupted logfile entry and ignore all others, which -might cause data loss. - -## Ignore recovery errors - - - -Ignore recovery errors: -`--wal.ignore-recovery-errors` - -Ignores any recovery errors not caused by corrupted logfiles but by logical -errors. Logical errors can occur if logfiles or any other server datafiles -have been manually edited or the server is somehow misconfigured. - -## Ignore (non-WAL) datafile errors - - - -Ignore datafile errors when loading collections: -`--database.ignore-datafile-errors boolean` - -If set to `false`, CRC mismatch and other errors in collection datafiles -will lead to a collection not being loaded at all. The collection in this -case becomes unavailable. If such collection needs to be loaded during WAL -recovery, the WAL recovery will also abort (if not forced with option -`--wal.ignore-recovery-errors true`). - -Setting this flag to `false` protects users from unintentionally using a -collection with corrupted datafiles, from which only a subset of the -original data can be recovered. Working with such collection could lead -to data loss and follow up errors. -In order to access such collection, it is required to inspect and repair -the collection datafile with the datafile debugger (arango-dfdb). - -If set to `true`, CRC mismatch and other errors during the loading of a -collection will lead to the datafile being partially loaded, up to the -position of the first error. All data up to until the invalid position -will be loaded. This will enable users to continue with collection -datafiles -even if they are corrupted, but this will result in only a partial load -of the original data and potential follow up errors. The WAL recovery -will still abort when encountering a collection with a corrupted datafile, -at least if `--wal.ignore-recovery-errors` is not set to `true`. - -Setting the option to `true` will also automatically repair potentially -corrupted VERSION files of databases on startup, so that the startup can -proceed. - -The default value is *false*, so collections with corrupted datafiles will -not be loaded at all, preventing partial loads and follow up errors. However, -if such collection is required at server startup, during WAL recovery, the -server will abort the recovery and refuse to start. diff --git a/Documentation/Books/Manual/Programs/Arangodump/Examples.md b/Documentation/Books/Manual/Programs/Arangodump/Examples.md deleted file mode 100644 index 7ef9ccc33a8d..000000000000 --- a/Documentation/Books/Manual/Programs/Arangodump/Examples.md +++ /dev/null @@ -1,220 +0,0 @@ -Arangodump Examples -=================== - -_arangodump_ can be invoked in a command line by executing the following command: - - arangodump --output-directory "dump" - -This will connect to an ArangoDB server and dump all non-system collections from -the default database (*_system*) into an output directory named *dump*. -Invoking _arangodump_ will fail if the output directory already exists. This is -an intentional security measure to prevent you from accidentally overwriting already -dumped data. If you are positive that you want to overwrite data in the output -directory, you can use the parameter *--overwrite true* to confirm this: - - arangodump --output-directory "dump" --overwrite true - -_arangodump_ will by default connect to the *_system* database using the default -endpoint. To override the endpoint, or specify a different user, use one of the -following startup options: - -- `--server.endpoint `: endpoint to connect to -- `--server.username `: username -- `--server.password `: password to use (omit this and you'll be prompted for the - password) -- `--server.authentication `: whether or not to use authentication - -If you want to connect to a different database or dump all databases you can additionaly -use the following startup options: - -- `--all-databases true`: must have access to all databases, and not specify a database. -- `--server.database `: name of the database to connect to - -Note that the specified user must have access to the databases. - -Here's an example of dumping data from a non-standard endpoint, using a dedicated -[database name](../../Appendix/Glossary.md#database-name): - - arangodump --server.endpoint tcp://192.168.173.13:8531 --server.username backup --server.database mydb --output-directory "dump" - -In contrast to the above call `--server.database` must not be specified when dumping -all databases using `--all-databases true`: - - arangodump --server.endpoint tcp://192.168.173.13:8531 --server.username backup --all-databases true --output-directory "dump-multiple" - -When finished, _arangodump_ will print out a summary line with some aggregate -statistics about what it did, e.g.: - - Processed 43 collection(s), wrote 408173500 byte(s) into datafiles, sent 88 batch(es) - -By default, _arangodump_ will dump both structural information and documents from all -non-system collections. To adjust this, there are the following command-line -arguments: - -- `--dump-data `: set to *true* to include documents in the dump. Set to *false* - to exclude documents. The default value is *true*. -- `--include-system-collections `: whether or not to include system collections - in the dump. The default value is *false*. **Set to _true_ if you are using named - graphs that you are interested in restoring.** - -For example, to only dump structural information of all collections (including system -collections), use: - - arangodump --dump-data false --include-system-collections true --output-directory "dump" - -To restrict the dump to just specific collections, there is is the *--collection* option. -It can be specified multiple times if required: - - arangodump --collection myusers --collection myvalues --output-directory "dump" - -Structural information for a collection will be saved in files with name pattern -*.structure.json*. Each structure file will contains a JSON object -with these attributes: -- *parameters*: contains the collection properties -- *indexes*: contains the collection indexes - -Document data for a collection will be saved in files with name pattern -*.data.json*. Each line in a data file is a document insertion/update or -deletion marker, alongside with some meta data. - -Cluster Backup --------------- - -Starting with Version 2.1 of ArangoDB, the *arangodump* tool also -supports sharding and can be used to backup data from a Cluster. -Simply point it to one of the _Coordinators_ and it -will behave exactly as described above, working on sharded collections -in the Cluster. - -Please see the [Limitations](Limitations.md). - -As above, the output will be one structure description file and one data -file per sharded collection. Note that the data in the data file is -sorted first by shards and within each shard by ascending timestamp. The -structural information of the collection contains the number of shards -and the shard keys. - -Note that the version of the arangodump client tool needs to match the -version of the ArangoDB server it connects to. - -### Advanced Cluster Options - -Starting with version 3.1.17, collections may be [created with shard -distribution](../../DataModeling/Collections/DatabaseMethods.md#create) -identical to an existing prototypical collection; i.e. shards are distributed in -the very same pattern as in the prototype collection. Such collections cannot be -dumped without the referenced collection or arangodump yields an error. - - arangodump --collection clonedCollection --output-directory "dump" - - ERROR Collection clonedCollection's shard distribution is based on a that of collection prototypeCollection, which is not dumped along. You may dump the collection regardless of the missing prototype collection by using the --ignore-distribute-shards-like-errors parameter. - -There are two ways to approach that problem. -Dump the prototype collection as well: - - arangodump --collection clonedCollection --collection prototypeCollection --output-directory "dump" - - Processed 2 collection(s), wrote 81920 byte(s) into datafiles, sent 1 batch(es) - -Or override that behavior to be able to dump the collection in isolation -individually: - - arangodump --collection clonedCollection --output-directory "dump" --ignore-distribute-shards-like-errors - - Processed 1 collection(s), wrote 34217 byte(s) into datafiles, sent 1 batch(es) - -Note that in consequence, restoring such a collection without its prototype is -affected. See documentation on [arangorestore](../Arangorestore/README.md) for -more details about restoring the collection. - -Encryption ----------- - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -Starting from version 3.3 encryption of the dump is supported. - -The dump is encrypted using an encryption keyfile, which must contain exactly 32 -bytes of data (required by the AES block cipher). - -The keyfile can be created by an external program, or, on Linux, by using a command -like the following: - -``` -dd if=/dev/random bs=1 count=32 of=yourSecretKeyFile -``` - -For security reasons, it is best to create these keys offline (away from your -database servers) and directly store them in your secret management -tool. - - -In order to create an encrypted backup, add the `--encryption.keyfile` -option when invoking _arangodump_, in addition to any other option you -are already using. The following example assumes that your secret key -is stored in ~/SECRET-KEY: - -``` -arangodump --collection "secret-collection" dump --encryption.keyfile ~/SECRET-KEY -``` - -Note that _arangodump_ will not store the key anywhere. It is the responsibility -of the user to find a safe place for the key. However, _arangodump_ will store -the used encryption method in a file named `ENCRYPTION` in the dump directory. -That way _arangorestore_ can later find out whether it is dealing with an -encrypted dump or not. - -Trying to restore the encrypted dump without specifying the key will fail: - -``` -arangorestore --collection "secret-collection" dump --create-collection true -``` - -and _arangorestore_ will report the following error: - -``` -the dump data seems to be encrypted with aes-256-ctr, but no key information was specified to decrypt the dump -it is recommended to specify either `--encryption.keyfile` or `--encryption.key-generator` when invoking arangorestore with an encrypted dump -``` - -It is required to use the exact same key when restoring the data. Again this is -done by providing the `--encryption.keyfile` parameter: - -``` -arangorestore --collection "secret-collection" dump --create-collection true --encryption.keyfile ~/SECRET-KEY -``` - -Using a different key will lead to the backup being non-recoverable. - -Note that encrypted backups can be used together with the already existing -RocksDB encryption-at-rest feature, but they can also be used for the MMFiles -engine, which does not have encryption-at-rest. - -Compression ------------ - -Introduced in: v3.4.6, v3.5.0 - -`--compress-output` - -Data can optionally be dumped in a compressed format to save space on disk. -The `--compress-output` option can not be used together with [Encryption](#encryption). - -If compression is enabled, no `.data.json` files are written. Instead, the -collection data gets compressed using the Gzip algorithm and for each collection -a `.data.json.gz` file is written. Metadata files such as `.structure.json` and -`.view.json` do not get compressed. - -``` -arangodump --output-directory "dump" --compress-output -``` - -Compressed dumps can be restored with *arangorestore*, which automatically -detects whether the data is compressed or not based on the file extension. - -``` -arangorestore --input-directory "dump" -``` diff --git a/Documentation/Books/Manual/Programs/Arangodump/Limitations.md b/Documentation/Books/Manual/Programs/Arangodump/Limitations.md deleted file mode 100644 index 85e4ac72662a..000000000000 --- a/Documentation/Books/Manual/Programs/Arangodump/Limitations.md +++ /dev/null @@ -1,16 +0,0 @@ -Arangodump Limitations -====================== - -_Arangodump_ has the following limitations: - -- In a Cluster, _arangodump_ does not guarantee to dump a consistent snapshot if write - operations happen while the dump is in progress. It is therefore recommended not to - perform any data-modification operations on the cluster while _arangodump_ - is running. This is in contrast to what happens on a single instance, a master/slave, - or active failover setup, where even if write operations are ongoing, the created dump - is consistent, as a snapshot is taken when the dump starts. -- If the MMFiles engine is in use, on a single instance, a master/slave, or active failover - setup, even if the write operations are suspended, it is not guaranteed that the dump includes - all the data that has been previously written as _arangodump_ will only dump the data - included in the _datafiles_ but not the data that has not been transferred from the _WAL_ - to the _datafiles_. A WAL flush can be forced as documented in the [WAL flush](../../Appendix/JavaScriptModules/WAL.md#flushing) section. \ No newline at end of file diff --git a/Documentation/Books/Manual/Programs/Arangodump/Maskings.md b/Documentation/Books/Manual/Programs/Arangodump/Maskings.md deleted file mode 100644 index c6d0c6226a6e..000000000000 --- a/Documentation/Books/Manual/Programs/Arangodump/Maskings.md +++ /dev/null @@ -1,742 +0,0 @@ -Arangodump Data Maskings -======================== - -*--maskings path-of-config* - -This feature allows you to define how sensitive data shall be dumped. -It is possible to exclude collections entirely, limit the dump to the -structural information of a collection (name, indexes, sharding etc.) -or to obfuscate certain fields for a dump. A JSON configuration file is -used to define which collections and fields to mask and how. - -The general structure of the configuration file looks like this: - -```json -{ - "collection-name": { - "type": MASKING_TYPE, - "maskings": [ - MASKING1, - MASKING2, - ... - ] - }, - ... -} -``` - -At the top level, there is an object with collection names and the masking -settings to be applied to them. Using `"*"` as collection name defines a -default behavior for collections not listed explicitly. - -Masking Types -------------- - -`type` is a string describing how to mask the given collection. -Possible values are: - -- `"exclude"`: the collection is ignored completely and not even the - structure data is dumped. - -- `"structure"`: only the collection structure is dumped, but no data at all - -- `"masked"`: the collection structure and all data is dumped. However, the data - is subject to obfuscation defined in the attribute `maskings`. It is an array - of objects, with one object per field to mask. Each object needs at least a - `path` and a `type` attribute to [define which field to mask](#path) and which - [masking function](#masking-functions) to apply. Depending on the - masking type, there may exist additional attributes. - -- `"full"`: the collection structure and all data is dumped. No masking is - applied to this collection at all. - -**Example** - -```json -{ - "private": { - "type": "exclude" - }, - - "log": { - "type": "structure" - }, - - "person": { - "type": "masked", - "maskings": [ - { - "path": "name", - "type": "xifyFront", - "unmaskedLength": 2 - }, - { - "path": ".security_id", - "type": "xifyFront", - "unmaskedLength": 2 - } - ] - } -} -``` - -- The collection called _private_ is completely ignored. -- Only the structure of the collection _log_ is dumped, but not the data itself. -- The collection _person_ is dumped completely but with maskings applied: - - The _name_ field is masked if it occurs on the top-level. - - It also masks fields with the name _security_id_ anywhere in the document. - - The masking function is of type [_xifyFront_](#xify-front) in both cases. - The additional setting `unmaskedLength` is specific so _xifyFront_. - -### Masking vs. dump-data option - -*arangodump* also supports a very coarse masking with the option -`--dump-data false`. This basically removes all data from the dump. - -You can either use `--masking` or `--dump-data false`, but not both. - -### Masking vs. include-collection option - -*arangodump* also supports a very coarse masking with the option -`--include-collection`. This will restrict the collections that are -dumped to the ones explicitly listed. - -It is possible to combine `--masking` and `--include-collection`. -This will take the intersection of exportable collections. - -Path ----- - -`path` defines which field to obfuscate. There can only be a single -path per masking, but an unlimited amount of maskings per collection. - -Note that the top-level system attributes like `_key`, `_from` are -never masked. - -To mask a top-level attribute value, the path is simply the attribute -name, for instance `"name"` to mask the value `"foobar"`: - -```json -{ - "_key": "1234", - "name": "foobar" -} -``` - -The path to a nested attribute `name` with a top-level attribute `person` -as its parent is `"person.name"`: - -```json -{ - "_key": "1234", - "person": { - "name": "foobar" - } -} -``` - -If the path starts with a `.` then it matches any path ending in `name`. -For example, `.name` will match the field `name` of all leaf attributes -in the document. Leaf attributes are attributes whose value is `null`, -`true`, `false`, or of data type `string`, `number` or `array`. -That means, it matches `name` at the top level -as well as at any nested level (e.g. `foo.bar.name`), but not nested -objects themselves. - -On the other hand, `name` will only match leaf attributes -at top level. `person.name` will match the attribute `name` of a leaf -in the top-level object `person`. If `person` was itself an object, -then the masking settings for this path would be ignored, because it -is not a leaf attribute. - -If the attribute value is an **array** then the masking is applied to -**all array elements individually**. - -If you have an attribute name that contains a dot, you need to quote the -name with either a tick or a backtick. For example: - - "path": "´name.with.dots´" - -or - - "path": "`name.with.dots`" - -**Example** - -The following configuration will replace the value of the `name` -attribute with an "xxxx"-masked string: - -```json -{ - "type": "xifyFront", - "path": ".name", - "unmaskedLength": 2 -} -``` - -The document: - -```json -{ - "name": "top-level-name", - "age": 42, - "nicknames" : [ { "name": "hugo" }, "egon" ], - "other": { - "name": [ "emil", { "secret": "superman" } ] - } -} -``` - -… will be changed as follows: - -```json -{ - "name": "xxxxxxxxxxxxme", - "age": 42, - "nicknames" : [ { "name": "xxgo" }, "egon" ], - "other": { - "name": [ "xxil", { "secret": "superman" } ] - } -} -``` - -The values `"egon"` and `"superman"` are not replaced, because they -are not contained in an attribute value of which the attribute name is -`name`. - -### Nested objects and arrays - -If you specify a path and the attribute value is an array then the -masking decision is applied to each element of the array as if this -was the value of the attribute. This applies to arrays inside the array too. - -If the attribute value is an object, then it is ignored and the attribute -does not get masked. To mask nested fields, specify the full path for each -leaf attribute. - -{% hint 'tip' %} -If some documents have an attribute `email` with a string as value, but other -documents store a nested object under the same attribute name, then make sure -to set up proper masking for the latter case, in which sub-attributes will not -get masked if there is only a masking configured for the attribute `email` -but not its nested attributes. -{% endhint %} - -**Examples** - -Masking `email` with the _Xify Front_ function will convert: - -```json -{ - "email" : "email address" -} -``` - -… into: - -```json -{ - "email" : "xxil xxxxxxss" -} -``` - -because `email` is a leaf attribute. The document: - -```json -{ - "email" : [ - "address one", - "address two", - [ - "address three" - ] - ] -} -``` - -… will be converted into: - -```json -{ - "email" : [ - "xxxxxss xne", - "xxxxxss xwo", - [ - "xxxxxss xxxee" - ] - ] -} -``` - -… because the masking is applied to each array element individually -including the elements of the sub-array. The document: - -```json -{ - "email" : { - "address" : "email address" - } -} -``` - -… will not be changed because `email` is not a leaf attribute. -To mask the email address, you could use the paths `email.address` -or `.address`. - -### Match all - -If the path is `"*"` then this match any leaf attribute. - -Masking Functions ------------------ - -{% hint 'info' %} -The following masking functions are only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/). -{% endhint %} - -- [Xify Front](#xify-front) -- [Zip](#zip) -- [Datetime](#datetime) -- [Integral Number](#integral-number) -- [Decimal Number](#decimal-number) -- [Credit Card Number](#credit-card-number) -- [Phone Number](#phone-number) -- [Email Address](#email-address) - -The masking functions: - -- [Random String](#random-string) -- [Random](#random) - -… are available in the Community Edition as well as the Enterprise Edition. - -### Random String - -This masking type will replace all values of attributes whose values are strings -with key `name` with an anonymized string. It is not guaranteed that the -string will be of the same length. Attribute whose values are not strings -are not modified. - -A hash of the original string is computed. If the original string is -shorter then the hash will be used. This will result in a longer -replacement string. If the string is longer than the hash then -characters will be repeated as many times as needed to reach the full -original string length. - -Masking settings: - -- `path` (string): which field to mask -- `type` (string): masking function name `"randomString"` - -**Example** - -```json -{ - "path": ".name", - "type": "randomString" -} -``` - -Above masking setting applies to all leaf attributes with name `.name`. -A document like: - -```json -{ - "_key" : "1234", - "name" : [ - "My Name", - { - "other" : "Hallo Name" - }, - [ - "Name One", - "Name Two" - ], - true, - false, - null, - 1.0, - 1234, - "This is a very long name" - ], - "deeply": { - "nested": { - "name": "John Doe", - "not-a-name": "Pizza" - } - } -} -``` - -… will be converted to: - -```json -{ - "_key": "1234", - "name": [ - "+y5OQiYmp/o=", - { - "other": "Hallo Name" - }, - [ - "ihCTrlsKKdk=", - "yo/55hfla0U=" - ], - true, - false, - null, - 1.0, - 1234, - "hwjAfNe5BGw=hwjAfNe5BGw=" - ], - "deeply": { - "nested": { - "name": "55fHctEM/wY=", - "not-a-name": "Pizza" - } - } -} -``` - -### Random - -This masking type works like random string for attributes with string -values. Values Attributes with integer, decimal or boolean values are -replaced by random integers, decimals or boolean. - -### Xify Front - -This masking type replaces the front characters with `x` and -blanks. Alphanumeric characters, `_` and `-` are replaced by `x`, -everything else is replaced by a blank. - -Masking settings: - -- `path` (string): which field to mask -- `type` (string): masking function name `"xifyFront"` -- `unmaskedLength` (number, _default: `2`_): how many characters to - leave as-is on the right-hand side of each word as integer value -- `hash` (bool, _default: `false`_): whether to append a hash value to the - masked string to avoid possible unique constraint violations caused by - the obfuscation -- `seed` (integer, _default: `0`_): used as secret for computing the hash. - A value of `0` means a random seed - -**Examples** - -```json -{ - "path": ".name", - "type": "xifyFront", - "unmaskedLength": 2 -} -``` - -This will mask all alphanumeric characters of a word except the last -two characters. Words of length 1 and 2 are unmasked. If the -attribute value is not a string the result will be `xxxx`. - - "This is a test!Do you agree?" - -… will become: - - "xxis is a xxst Do xou xxxee " - -There is a catch. If you have an index on the attribute the masking -might distort the index efficiency or even cause errors in case of a -unique index. - -```json -{ - "type": "xifyFront", - "path": ".name", - "unmaskedLength": 2, - "hash": true -} -``` - -This will add a hash at the end of the string. - - "This is a test!Do you agree?" - -… will become - - "xxis is a xxst Do xou xxxee NAATm8c9hVQ=" - -Note that the hash is based on a random secret that is different for -each run. This avoids dictionary attacks which can be used to guess -values based pre-computations on dictionaries. - -If you need reproducible results, i.e. hashes that do not change between -different runs of *arangodump*, you need to specify a secret as seed, -a number which must not be `0`. - -```json -{ - "type": "xifyFront", - "path": ".name", - "unmaskedLength": 2, - "hash": true, - "seed": 246781478647 -} -``` - -### Zip - -This masking type replaces a zip code with a random one. -It uses the following rules: - -- If a character of the original zip code is a digit it will be replaced - by a random digit. -- If a character of the original zip code is a letter it - will be replaced by a random letter keeping the case. -- If the attribute value is not a string then the default value is used. - -Note that this will generate random zip codes. Therefore there is a -chance that the same zip code value is generated multiple times, which can -cause unique constraint violations if a unique index is or will be -used on the zip code attribute. - -Masking settings: - -- `path` (string): which field to mask -- `type` (string): masking function name `"zip"` -- `default` (string, _default: `"12345"`_): if the input field is not of - data type `string`, then this value is used - -**Examples** - -```json -{ - "path": ".code", - "type": "zip", -} -``` - -This replaces real zip codes stored in fields called `code` at any level -with random ones. `"12345"` is used as fallback value. - -```json -{ - "path": ".code", - "type": "zip", - "default": "abcdef" -} -``` - -If the original zip code is: - - 50674 - -… it will be replaced by e.g.: - - 98146 - -If the original zip code is: - - SA34-EA - -… it will be replaced by e.g.: - - OW91-JI - -If the original zip code is `null`, `true`, `false` or a number, then the -user-defined default value of `"abcdef"` will be used. - -### Datetime - -This masking type replaces the value of the attribute with a random -date between two configured dates in a customizable format. - -Masking settings: - -- `path` (string): which field to mask -- `type` (string): masking function name `"datetime"` -- `begin` (string, _default: `"1970-01-01T00:00:00.000"`_): - earliest point in time to return. Date time string in ISO 8601 format. -- `end` (string, _default: now_): - latest point in time to return. Date time string in ISO 8601 format. - In case a partial date time string is provided (e.g. `2010-06` without day - and time) the earliest date and time is assumed (`2010-06-01T00:00:00.000`). - The default value is the current system date and time. -- `format` (string, _default: `""`_): the formatting string format is - described in [DATE_FORMAT()](../../../AQL/Functions/Date.html#dateformat). - If no format is specified, then the result will be an empty string. - -**Example** - -```json -{ - "path": "eventDate", - "type": "datetime", - "begin" : "2019-01-01", - "end": "2019-12-31", - "format": "%yyyy-%mm-%dd", -} -``` - -Above example masks the field `eventDate` by returning a random date time -string in the range of January 1st and December 31st in 2019 using a format -like `2019-06-17`. - -### Integral Number - -This masking type replaces the value of the attribute with a random -integral number. It will replace the value even if it is a string, -Boolean, or `null`. - -Masking settings: - -- `path` (string): which field to mask -- `type` (string): masking function name `"integer"` -- `lower` (number, _default: `-100`_): smallest integer value to return -- `upper` (number, _default: `100`_): largest integer value to return - -**Example** - -```json -{ - "path": "count", - "type": "integer", - "lower" : -100, - "upper": 100 -} -``` - -This masks the field `count` with a random number between --100 and 100 (inclusive). - -### Decimal Number - -This masking type replaces the value of the attribute with a random -floating point number. It will replace the value even if it is a string, -Boolean, or `null`. - -Masking settings: - -- `path` (string): which field to mask -- `type` (string): masking function name `"decimal"` -- `lower` (number, _default: `-1`_): smallest floating point value to return -- `upper` (number, _default: `1`_): largest floating point value to return -- `scale` (number, _default: `2`_): maximal amount of digits in the - decimal fraction part - -**Examples** - -```json -{ - "path": "rating", - "type": "decimal", - "lower" : -0.3, - "upper": 0.3 -} -``` - -This masks the field `rating` with a random floating point number between --0.3 and +0.3 (inclusive). By default, the decimal has a scale of 2. -That means, it has at most 2 digits after the dot. - -The configuration: - -```json -{ - "path": "rating", - "type": "decimal", - "lower" : -0.3, - "upper": 0.3, - "scale": 3 -} -``` - -… will generate numbers with at most 3 decimal digits. - -### Credit Card Number - -This masking type replaces the value of the attribute with a random -credit card number (as integer number). -See [Luhn algorithm](https://en.wikipedia.org/wiki/Luhn_algorithm) -for details. - -Masking settings: - -- `path` (string): which field to mask -- `type` (string): masking function name `"creditCard"` - -**Example** - -```json -{ - "path": "ccNumber", - "type": "creditCard" -} -``` - -This generates a random credit card number to mask field `ccNumber`, -e.g. `4111111414443302`. - -### Phone Number - -This masking type replaces a phone number with a random one. -It uses the following rule: - -- If a character of the original number is a digit - it will be replaced by a random digit. -- If it is a letter it is replaced by a random letter. -- All other characters are left unchanged. -- If the attribute value is not a string it is replaced by the - default value. - -Masking settings: - -- `path` (string): which field to mask -- `type` (string): masking function name `"phone"` -- `default` (string, _default: `"+1234567890"`_): if the input field - is not of data type `string`, then this value is used - -**Examples** - -```json -{ - "path": "phone.landline", - "type": "phone" -} -``` - -This will replace an existing phone number with a random one, for instance -`"+31 66-77-88-xx"` might get substituted by `"+75 10-79-52-sb"`. - -```json -{ - "path": "phone.landline", - "type": "phone", - "default": "+49 12345 123456789" -} -``` - -This masks a phone number as before, but falls back to a different default -phone number in case the input value is not a string. - -### Email Address - -This masking type takes an email address, computes a hash value and -splits it into three equal parts `AAAA`, `BBBB`, and `CCCC`. The -resulting email address is in the format `AAAA.BBBB@CCCC.invalid`. -The hash is based on a random secret that is different for each run. - -Masking settings: - -- `path` (string): which field to mask -- `type` (string): masking function name `"email"` - -**Example** - -```json -{ - "path": ".email", - "type": "email" -} -``` - -This masks every leaf attribute `email` with a random email address -similar to `"EHwG.3AOg@hGU=.invalid"`. diff --git a/Documentation/Books/Manual/Programs/Arangodump/Options.md b/Documentation/Books/Manual/Programs/Arangodump/Options.md deleted file mode 100644 index c1828bacc827..000000000000 --- a/Documentation/Books/Manual/Programs/Arangodump/Options.md +++ /dev/null @@ -1,32 +0,0 @@ -Arangodump Options -================== - -Usage: `arangodump []` - -@startDocuBlock program_options_arangodump - -Notes ------ - -### Encryption Option Details - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -*--encryption.keyfile path-of-keyfile* - -The file `path-to-keyfile` must contain the encryption key. This -file must be secured, so that only `arangodump` or `arangorestore` can access it. -You should also ensure that in case someone steals your hardware, they will not be -able to read the file. For example, by encrypting `/mytmpfs` or -creating an in-memory file-system under `/mytmpfs`. The encryption keyfile must -contain 32 bytes of data. - -*--encryption.key-generator path-to-my-generator* - -This output is used if you want to use the program to generate your encryption key. -The program `path-to-my-generator` must output the encryption on standard output -and exit. The encryption keyfile must contain 32 bytes of data. - diff --git a/Documentation/Books/Manual/Programs/Arangodump/README.md b/Documentation/Books/Manual/Programs/Arangodump/README.md deleted file mode 100644 index a7c13d4624ca..000000000000 --- a/Documentation/Books/Manual/Programs/Arangodump/README.md +++ /dev/null @@ -1,17 +0,0 @@ -Arangodump -========== - -_Arangodump_ is a command-line client tool to create backups of the data and -structures stored in ArangoDB. - -Dumps are meant to be restored with [_Arangorestore_](../Arangorestore/README.md). - -If you want to export for external programs to formats like JSON or CSV, see -[_Arangoexport_](../Arangoexport/README.md) instead. - -_Arangodump_ can be used for all ArangoDB deployments modes (Single Instance, -Master/Slave, Active Failover, Cluster and DC2DC) and it can backup selected collections -or all collections of a database, optionally including _system_ collections. One -can backup the structure, i.e. the collections with their configuration without -any data, only the data stored in them, or both. If you are using the Enterprise -Edition, dumps can optionally be encrypted. diff --git a/Documentation/Books/Manual/Programs/Arangoexport/Examples.md b/Documentation/Books/Manual/Programs/Arangoexport/Examples.md deleted file mode 100644 index f1b0f9a41ca1..000000000000 --- a/Documentation/Books/Manual/Programs/Arangoexport/Examples.md +++ /dev/null @@ -1,143 +0,0 @@ -Arangoexport Examples -===================== - -_arangoexport_ can be invoked by executing the following command in a command line: - - arangoexport --collection test --output-directory "dump" - -This exports the collections *test* into the directory *dump* as one big json array. Every entry -in this array is one document from the collection without a specific order. To export more than -one collection at a time specify multiple *--collection* options. - -The default output directory is *export*. - -_arangoexport_ will by default connect to the *_system* database using the default -endpoint. If you want to connect to a different database or a different endpoint, -or use authentication, you can use the following command-line options: - -- *--server.database *: name of the database to connect to -- *--server.endpoint *: endpoint to connect to -- *--server.username *: username -- *--server.password *: password to use (omit this and you'll be prompted for the - password) -- *--server.authentication *: whether or not to use authentication - -Here's an example of exporting data from a non-standard endpoint, using a dedicated -[database name](../../Appendix/Glossary.md#database-name): - - arangoexport --server.endpoint tcp://192.168.173.13:8531 --server.username backup --server.database mydb --collection test --output-directory "my-export" - -When finished, _arangoexport_ will print out a summary line with some aggregate -statistics about what it did, e.g.: - - Processed 2 collection(s), wrote 9031763 Byte(s), 78 HTTP request(s) - - -Export JSON ------------ - - arangoexport --type json --collection test - -This exports the collection *test* into the output directory *export* as one json array. -Every array entry is one document from the collection *test* - -Export JSONL ------------- - - arangoexport --type jsonl --collection test - -This exports the collection *test* into the output directory *export* as [JSONL](http://jsonlines.org). -Every line in the export is one document from the collection *test* as JSON. - -Export CSV ----------- - - arangoexport --type csv --collection test --fields _key,_id,_rev - -This exports the collection *test* into the output directory *export* as CSV. The first -line contains the header with all field names. Each line is one document represented as -CSV and separated with a comma. Objects and arrays are represented as a JSON string. - - -Export XML ----------- - - arangoexport --type xml --collection test - -This exports the collection *test* into the output directory *export* as generic XML. -The root element of the generated XML file is named *collection*. -Each document in the collection is exported in a *doc* XML attribute. -Each document attribute is exported as a generic *att* element, which has a -*name* attribute with the attribute name, a *type* attribute indicating the -attribute value type, and a *value* attribute containing the attribute's value. - -Export XGMML ------------- - -[XGMML](https://en.wikipedia.org/wiki/XGMML) is an XML application -based on [GML](https://en.wikipedia.org/wiki/Graph_Modelling_Language). -To view the XGMML file you can use for example [Cytoscape](http://cytoscape.org). - - -{% hint 'warning' %} -If you export all attributes (*--xgmml-label-only false*) note that attribute types have to be the same for all documents. It wont work if you have an attribute named rank that is in one document a string and in another document an integer. - -Bad - - { "rank": 1 } // doc1 - { "rank": "2" } // doc2 - -Good - - { "rank": 1 } // doc1 - { "rank": 2 } // doc2 - -{% endhint %} - -**XGMML specific options** - -*--xgmml-label-attribute* specify the name of the attribute that will become the label in the xgmml file. - -*--xgmml-label-only* set to true will only export the label without any attributes in edges or nodes. - - -**Export based on collections** - - arangoexport --type xgmml --graph-name mygraph --collection vertex --collection edge - -This exports an unnamed graph with vertex collection *vertex* and edge collection *edge* into the xgmml file *mygraph.xgmml*. - - -**Export based on a named graph** - - arangoexport --type xgmml --graph-name mygraph - -This exports the named graph mygraph into the xgmml file *mygraph.xgmml*. - - -**Export XGMML without attributes** - - arangoexport --type xgmml --graph-name mygraph --xgmml-label-only true - -This exports the named graph mygraph into the xgmml file *mygraph.xgmml* without the *<att>* tag in nodes and edges. - - -**Export XGMML with a specific label** - - arangoexport --type xgmml --graph-name mygraph --xgmml-label-attribute name - -This exports the named graph mygraph into the xgmml file *mygraph.xgmml* with a label from documents attribute *name* instead of the default attribute *label*. - -Export via AQL query --------------------- - - arangoexport --type jsonl --query "FOR book IN books FILTER book.sells > 100 RETURN book" - -Export via an AQL query allows you to export the returned data as the type specified with *--type*. -The example exports all books as JSONL that are sold more than 100 times. - - arangoexport --type csv --fields title,category1,category2 --query "FOR book IN books RETURN { title: book.title, category1: book.categories[0], category2: book.categories[1] }" - -A *fields* list is required for CSV exports, but you can use an AQL query to produce -these fields. For example, you can de-normalize document structures like arrays and -nested objects to a tabular form as demonstrated above. diff --git a/Documentation/Books/Manual/Programs/Arangoexport/Options.md b/Documentation/Books/Manual/Programs/Arangoexport/Options.md deleted file mode 100644 index 86788cca98a1..000000000000 --- a/Documentation/Books/Manual/Programs/Arangoexport/Options.md +++ /dev/null @@ -1,6 +0,0 @@ -Arangoexport Options -==================== - -Usage: `arangoexport []` - -@startDocuBlock program_options_arangoexport diff --git a/Documentation/Books/Manual/Programs/Arangoexport/README.md b/Documentation/Books/Manual/Programs/Arangoexport/README.md deleted file mode 100644 index e21ed32cd0c9..000000000000 --- a/Documentation/Books/Manual/Programs/Arangoexport/README.md +++ /dev/null @@ -1,9 +0,0 @@ -Arangoexport -============ - -_Arangoexport_ is a command-line client tool to export data from -[ArangoDB servers](../Arangod/README.md) to formats like JSON, CSV or XML for -consumption by third-party tools. - -If you want to create backups, see [_Arangodump_](../Arangodump/README.md) -instead. diff --git a/Documentation/Books/Manual/Programs/Arangoimport/Details.md b/Documentation/Books/Manual/Programs/Arangoimport/Details.md deleted file mode 100644 index 4e6ed18f81d6..000000000000 --- a/Documentation/Books/Manual/Programs/Arangoimport/Details.md +++ /dev/null @@ -1,183 +0,0 @@ -Arangoimport Details -==================== - -The most convenient method to import a lot of data into ArangoDB is to use the -*arangoimport* command-line tool. It allows you to bulk import data records -from a file into a database collection. Multiple files can be imported into -the same or different collections by invoking it multiple times. - -Importing into an Edge Collection ---------------------------------- - -Arangoimport can also be used to import data into an existing edge collection. -The import data must, for each edge to import, contain at least the *_from* and -*_to* attributes. These indicate which other two documents the edge should connect. -It is necessary that these attributes are set for all records, and point to -valid document IDs in existing collections. - -*Example* - -```js -{ "_from" : "users/1234", "_to" : "users/4321", "desc" : "1234 is connected to 4321" } -``` - -**Note**: The edge collection must already exist when the import is started. Using -the *--create-collection* flag will not work because arangoimport will always try to -create a regular document collection if the target collection does not exist. - -Attribute Naming and Special Attributes ---------------------------------------- - -Attributes whose names start with an underscore are treated in a special way by -ArangoDB: - -- the optional *_key* attribute contains the document's key. If specified, the value - must be formally valid (e.g. must be a string and conform to the naming conventions). - Additionally, the key value must be unique within the - collection the import is run for. -- *_from*: when importing into an edge collection, this attribute contains the id - of one of the documents connected by the edge. The value of *_from* must be a - syntactically valid document id and the referred collection must exist. -- *_to*: when importing into an edge collection, this attribute contains the id - of the other document connected by the edge. The value of *_to* must be a - syntactically valid document id and the referred collection must exist. -- *_rev*: this attribute contains the revision number of a document. However, the - revision numbers are managed by ArangoDB and cannot be specified on import. Thus - any value in this attribute is ignored on import. - -If you import values into *_key*, you should make sure they are valid and unique. - -When importing data into an edge collection, you should make sure that all import -documents can *_from* and *_to* and that their values point to existing documents. - -To avoid specifying complete document ids (consisting of collection names and document -keys) for *_from* and *_to* values, there are the options *--from-collection-prefix* and -*--to-collection-prefix*. If specified, these values will be automatically prepended -to each value in *_from* (or *_to* resp.). This allows specifying only document keys -inside *_from* and/or *_to*. - -*Example* - - arangoimport --from-collection-prefix users --to-collection-prefix products ... - -Importing the following document will then create an edge between *users/1234* and -*products/4321*: - -```js -{ "_from" : "1234", "_to" : "4321", "desc" : "users/1234 is connected to products/4321" } -``` - -Updating existing documents ---------------------------- - -By default, arangoimport will try to insert all documents from the import file into the -specified collection. In case the import file contains documents that are already present -in the target collection (matching is done via the *_key* attributes), then a default -arangoimport run will not import these documents and complain about unique key constraint -violations. - -However, arangoimport can be used to update or replace existing documents in case they -already exist in the target collection. It provides the command-line option *--on-duplicate* -to control the behavior in case a document is already present in the database. - -The default value of *--on-duplicate* is *error*. This means that when the import file -contains a document that is present in the target collection already, then trying to -re-insert a document with the same *_key* value is considered an error, and the document in -the database will not be modified. - -Other possible values for *--on-duplicate* are: - -- *update*: each document present in the import file that is also present in the target - collection already will be updated by arangoimport. *update* will perform a partial update - of the existing document, modifying only the attributes that are present in the import - file and leaving all other attributes untouched. - - The values of system attributes *_id*, *_key*, *_rev*, *_from* and *_to* cannot be - updated or replaced in existing documents. - -- *replace*: each document present in the import file that is also present in the target - collection already will be replace by arangoimport. *replace* will replace the existing - document entirely, resulting in a document with only the attributes specified in the import - file. - - The values of system attributes *_id*, *_key*, *_rev*, *_from* and *_to* cannot be - updated or replaced in existing documents. - -- *ignore*: each document present in the import file that is also present in the target - collection already will be ignored and not modified in the target collection. - -When *--on-duplicate* is set to either *update* or *replace*, arangoimport will return the -number of documents updated/replaced in the *updated* return value. When set to another -value, the value of *updated* will always be zero. When *--on-duplicate* is set to *ignore*, -arangoimport will return the number of ignored documents in the *ignored* return value. -When set to another value, *ignored* will always be zero. - -It is possible to perform a combination of inserts and updates/replaces with a single -arangoimport run. When *--on-duplicate* is set to *update* or *replace*, all documents present -in the import file will be inserted into the target collection provided they are valid -and do not already exist with the specified *_key*. Documents that are already present -in the target collection (identified by *_key* attribute) will instead be updated/replaced. - -Result output -------------- - -An _arangoimport_ import run will print out the final results on the command line. -It will show the - -- number of documents created (*created*) -- number of documents updated/replaced (*updated/replaced*, only non-zero if - *--on-duplicate* was set to *update* or *replace*, see below) -- number of warnings or errors that occurred on the server side (*warnings/errors*) -- number of ignored documents (only non-zero if *--on-duplicate* was set to *ignore*). - -*Example* - -```js -created: 2 -warnings/errors: 0 -updated/replaced: 0 -ignored: 0 -``` - -For CSV and TSV imports, the total number of input file lines read will also be printed -(*lines read*). - -_arangoimport_ will also print out details about warnings and errors that happened on the -server-side (if any). - -### Automatic pacing with busy or low throughput disk subsystems - -Arangoimport has an automatic pacing algorithm that limits how fast -data is sent to the ArangoDB servers. This pacing algorithm exists to -prevent the import operation from failing due to slow responses. - -Google Compute and other VM providers limit the throughput of disk -devices. Google's limit is more strict for smaller disk rentals, than -for larger. Specifically, a user could choose the smallest disk space -and be limited to 3 Mbytes per second. Similarly, other users' -processes on the shared VM can limit available throughput of the disk -devices. - -The automatic pacing algorithm adjusts the transmit block size -dynamically based upon the actual throughput of the server over the -last 20 seconds. Further, each thread delivers its portion of the data -in mostly non-overlapping chunks. The thread timing creates -intentional windows of non-import activity to allow the server extra -time for meta operations. - -Automatic pacing intentionally does not use the full throughput of a -disk device. An unlimited (really fast) disk device might not need -pacing. Raising the number of threads via the `--threads X` command -line to any value of `X` greater than 2 will increase the total -throughput used. - -Automatic pacing frees the user from adjusting the throughput used to -match available resources. It is disabled by manually specifying any -`--batch-size`. 16777216 was the previous default for *--batch-size*. -Having *--batch-size* too large can lead to transmitted data piling-up -on the server, resulting in a TimeoutError. - -The pacing algorithm works successfully with MMFiles with disks -limited to read and write throughput as small as 1 Mbyte per -second. The algorithm works successfully with RocksDB with disks -limited to read and write throughput as small as 3 Mbyte per second. diff --git a/Documentation/Books/Manual/Programs/Arangoimport/ExamplesCsv.md b/Documentation/Books/Manual/Programs/Arangoimport/ExamplesCsv.md deleted file mode 100644 index 4ae24d12548e..000000000000 --- a/Documentation/Books/Manual/Programs/Arangoimport/ExamplesCsv.md +++ /dev/null @@ -1,161 +0,0 @@ -Arangoimport Examples: CSV / TSV -================================ - -Importing CSV Data ------------------- - -_arangoimport_ offers the possibility to import data from CSV files. This -comes handy when the data at hand is in CSV format already and you don't want to -spend time converting them to JSON for the import. - -To import data from a CSV file, make sure your file contains the attribute names -in the first row. All the following lines in the file will be interpreted as -data records and will be imported. - -The CSV import requires the data to have a homogeneous structure. All records -must have exactly the same amount of columns as there are headers. By default, -lines with a different number of values will not be imported and there will be -warnings for them. To still import lines with less values than in the header, -there is the *--ignore-missing* option. If set to true, lines that have a -different amount of fields will be imported. In this case only those attributes -will be populated for which there are values. Attributes for which there are -no values present will silently be discarded. - -Example: - -``` -"first","last","age","active","dob" -"John","Connor",25,true -"Jim","O'Brady" -``` - -With *--ignore-missing* this will produce the following documents: - -```js -{ "first" : "John", "last" : "Connor", "active" : true, "age" : 25 } -{ "first" : "Jim", "last" : "O'Brady" } -``` - -The cell values can have different data types though. If a cell does not have -any value, it can be left empty in the file. These values will not be imported -so the attributes will not "be there" in document created. Values enclosed in -quotes will be imported as strings, so to import numeric values, boolean values -or the null value, don't enclose the value in quotes in your file. - -We'll be using the following import for the CSV import: - -``` -"first","last","age","active","dob" -"John","Connor",25,true, -"Jim","O'Brady",19,, -"Lisa","Jones",,,"1981-04-09" -Hans,dos Santos,0123,, -Wayne,Brewer,,false, -``` - -The command line to execute the import is: - - arangoimport --file "data.csv" --type csv --collection "users" - -The above data will be imported into 5 documents which will look as follows: - -```js -{ "first" : "John", "last" : "Connor", "active" : true, "age" : 25 } -{ "first" : "Jim", "last" : "O'Brady", "age" : 19 } -{ "first" : "Lisa", "last" : "Jones", "dob" : "1981-04-09" } -{ "first" : "Hans", "last" : "dos Santos", "age" : 123 } -{ "first" : "Wayne", "last" : "Brewer", "active" : false } -``` - -As can be seen, values left completely empty in the input file will be treated -as absent. Numeric values not enclosed in quotes will be treated as numbers. -Note that leading zeros in numeric values will be removed. To import numbers -with leading zeros, please use strings. -The literals *true* and *false* will be treated as booleans if they are not -enclosed in quotes. Other values not enclosed in quotes will be treated as -strings. -Any values enclosed in quotes will be treated as strings, too. - -String values containing the quote character or the separator must be enclosed -with quote characters. Within a string, the quote character itself must be -escaped with another quote character (or with a backslash if the *--backslash-escape* -option is used). - -Note that the quote and separator characters can be adjusted via the -*--quote* and *--separator* arguments when invoking _arangoimport_. The quote -character defaults to the double quote (*"*). To use a literal quote in a -string, you can use two quote characters. -To use backslash for escaping quote characters, please set the option -*--backslash-escape* to *true*. - -The importer supports Windows (CRLF) and Unix (LF) line breaks. Line breaks might -also occur inside values that are enclosed with the quote character. - -Here's an example for using literal quotes and newlines inside values: - -``` -"name","password" -"Foo","r4ndom""123!" -"Bar","wow! -this is a -multine password!" -"Bartholomew ""Bart"" Simpson","Milhouse" -``` - -Extra whitespace at the end of each line will be ignored. Whitespace at the -start of lines or between field values will not be ignored, so please make sure -that there is no extra whitespace in front of values or between them. - -Importing TSV Data ------------------- - -You may also import tab-separated values (TSV) from a file. This format is very -simple: every line in the file represents a data record. There is no quoting or -escaping. That also means that the separator character (which defaults to the -tabstop symbol) must not be used anywhere in the actual data. - -As with CSV, the first line in the TSV file must contain the attribute names, -and all lines must have an identical number of values. - -If a different separator character or string should be used, it can be specified -with the *--separator* argument. - -An example command line to execute the TSV import is: - - arangoimport --file "data.tsv" --type tsv --collection "users" - -Attribute Name Translation --------------------------- - -For the CSV and TSV input formats, attribute names can be translated automatically. -This is useful in case the import file has different attribute names than those -that should be used in ArangoDB. - -A common use case is to rename an "id" column from the input file into "_key" as -it is expected by ArangoDB. To do this, specify the following translation when -invoking arangoimport: - - arangoimport --file "data.csv" --type csv --translate "id=_key" - -Other common cases are to rename columns in the input file to *_from* and *_to*: - - arangoimport --file "data.csv" --type csv --translate "from=_from" --translate "to=_to" - -The *translate* option can be specified multiple types. The source attribute name -and the target attribute must be separated with a *=*. - -Ignoring Attributes -------------------- - -For the CSV and TSV input formats, certain attribute names can be ignored on imports. -In an ArangoDB cluster there are cases where this can come in handy, -when your documents already contain a `_key` attribute -and your collection has a sharding attribute other than `_key`: In the cluster this -configuration is not supported, because ArangoDB needs to guarantee the uniqueness of the `_key` -attribute in *all* shards of the collection. - - arangoimport --file "data.csv" --type csv --remove-attribute "_key" - -The same thing would apply if your data contains an *_id* attribute: - - arangoimport --file "data.csv" --type csv --remove-attribute "_id" diff --git a/Documentation/Books/Manual/Programs/Arangoimport/ExamplesJson.md b/Documentation/Books/Manual/Programs/Arangoimport/ExamplesJson.md deleted file mode 100644 index f78b3656a6f3..000000000000 --- a/Documentation/Books/Manual/Programs/Arangoimport/ExamplesJson.md +++ /dev/null @@ -1,218 +0,0 @@ -Arangoimport Examples: JSON -=========================== - -Using JSON as data format, records are represented as JSON objects and called -documents in ArangoDB. They are self-contained. Therefore, there is no need -for all records in a collection to have the same attribute names or types. -Documents can be inhomogeneous while data types can be fully preserved. - -Input file formats ------------------- - -*arangoimport* supports two formats when importing JSON data: - -- [JSON](http://json.org/) – JavaScript Object Notation -- [JSON Lines](http://jsonlines.org/) – - also known as _JSONL_ or new-line delimited JSON - -Multiple documents can be stored in standard JSON format in a top-level array -with objects as members: - -```js -[ - { "_key": "one", "value": 1 }, - { "_key": "two", "value": 2 }, - { "_key": "foo", "value": "bar" }, - ... -] -``` - -This format allows line breaks for formatting (i.e. pretty printing): - -```js -[ - { - "_key": "one", - "value": 1 - }, - { - "_key": "two", - "value": 2 - }, - { - "_key": "foo", - "value": "bar" - }, - ... -] -``` - -It requires parsers to read the entire input in order to verify that the -array is properly closed at the very end. _arangoimport_ will need to read -the whole input before it can send the first batch to the server. -By default, it will allow importing such files up to a size of about 16 MB. -If you want to allow your _arangoimport_ instance to use more memory, increase -the maximum file size by specifying the command-line option `--batch-size`. -For example, to set the batch size to 32 MB, use the following command: - - arangoimport --file "data.json" --type json --collection "users" --batch-size 33554432 - -_JSON Lines_ formatted data allows processing each line individually: - -```js -{ "_key": "one", "value": 1 } -{ "_key": "two", "value": 2 } -{ "_key": "foo", "value": "bar" } -... -``` - -The above format can be imported sequentially by _arangoimport_. It will read -data from the input in chunks and send it in batches to the server. Each batch -will be about as big as specified in the command-line parameter `--batch-size`. - -Please note that you may still need to increase the value of `--batch-size` if a -single document inside the input file is bigger than the value of `--batch-size`. - -_JSON Lines_ does not allow line breaks for pretty printing. There has to be one -complete JSON object on each line. A JSON array or primitive value per line is -not supported by _arangoimport_ in contrast to the JSON Lines specification, -which allows any valid JSON value on a line. - -Converting JSON to JSON Lines ------------------------------ - -An input with JSON objects in an array, optionally pretty printed, can be -easily converted into JSONL with one JSON object per line using the -[**jq** command line tool](http://stedolan.github.io/jq/): - -``` -jq -c ".[]" inputFile.json > outputFile.jsonl -``` - -The `-c` option enables compact JSON (as opposed to pretty printed JSON). -`".[]"` is a filter that unpacks the top-level array and effectively puts each -object in that array on a separate line in combination with the compact option. - -An example `inputFile.json` can look like this: - -```json -[ - { - "isActive": true, - "name": "Evans Wheeler", - "latitude": -0.119406, - "longitude": 146.271888, - "tags": [ - "amet", - "qui", - "velit" - ] - }, - { - "isActive": true, - "name": "Coffey Barron", - "latitude": -37.78772, - "longitude": 131.218935, - "tags": [ - "dolore", - "exercitation", - "irure", - "velit" - ] - } -] -``` - -The conversion produces the following `outputFile.jsonl`: - -```json -{"isActive":true,"name":"Evans Wheeler","latitude":-0.119406,"longitude":146.271888,"tags":["amet","qui","velit"]} -{"isActive":true,"name":"Coffey Barron","latitude":-37.78772,"longitude":131.218935,"tags":["dolore","exercitation","irure","velit"]} -``` - -Import Example and Common Options ---------------------------------- - -We will be using these example user records to import: - -```js -{ "name" : { "first" : "John", "last" : "Connor" }, "active" : true, "age" : 25, "likes" : [ "swimming"] } -{ "name" : { "first" : "Jim", "last" : "O'Brady" }, "age" : 19, "likes" : [ "hiking", "singing" ] } -{ "name" : { "first" : "Lisa", "last" : "Jones" }, "dob" : "1981-04-09", "likes" : [ "running" ] } -``` - -To import these records, all you need to do is to put them into a file -(with one line for each record to import), save it as `data.jsonl` and run -the following command: - - arangoimport --file "data.jsonl" --type jsonl --collection users - -This will transfer the data to the server, import the records, and print a -status summary. - -To show the intermediate progress during the import process, the -option `--progress` can be added. This option will show the percentage of the -input file that has been sent to the server. This will only be useful for big -import files. - - arangoimport --file "data.jsonl" --type jsonl --collection users --progress true - -It is also possible to use the output of another command as an input for -_arangoimport_. For example, the following shell command can be used to pipe -data from the `cat` process to arangoimport (Linux/Cygwin only): - - cat data.json | arangoimport --file - --type jsonl --collection users - -In a command line or PowerShell on Windows, there is the `type` command: - - type data.json | arangoimport --file - --type jsonl --collection users - -The option `--file -` with a hyphen as file name is special and makes it -read from standard input. No progress can be reported for such imports as the -size of the input will be unknown to arangoimport. - -By default, the endpoint `tcp://127.0.0.1:8529` will be used. If you want to -specify a different endpoint, you can use the `--server.endpoint` option. You -probably want to specify a database user and password as well. You can do so by -using the options `--server.username` and `--server.password`. If you do not -specify a password, you will be prompted for one. - - arangoimport --server.endpoint tcp://127.0.0.1:8529 --server.username root ... - -Note that the collection (*users* in this case) must already exist or the import -will fail. If you want to create a new collection with the import data, you need -to specify the `--create-collection` option. It will create a document collection -by default and not an edge collection. - - arangoimport --file "data.jsonl" --type jsonl --collection users --create-collection true - -To create an edge collection instead, use the `--create-collection-type` option -and set it to *edge*: - - arangoimport --collection myedges --create-collection true --create-collection-type edge ... - -When importing data into an existing collection it is often convenient to first -remove all data from the collection and then start the import. This can be achieved -by passing the `--overwrite` parameter to _arangoimport_. If it is set to *true*, -any existing data in the collection will be removed prior to the import. Note -that any existing index definitions for the collection will be preserved even if -`--overwrite` is set to true. - - arangoimport --file "data.jsonl" --type jsonl --collection users --overwrite true - -Data gets imported into the specified collection in the default database -(*_system*). To specify a different database, use the `--server.database` -option when invoking _arangoimport_. If you want to import into a nonexistent -database you need to pass `--create-database true` to create it on-the-fly. - -The tool also supports parallel imports, with multiple threads. Using multiple -threads may provide a speedup, especially when using the RocksDB storage engine. -To specify the number of parallel threads use the `--threads` option: - - arangoimport --threads 4 --file "data.jsonl" --type jsonl --collection users - -Using multiple threads may lead to a non-sequential import of the input -data. Data that appears later in the input file may be imported earlier than data -that appears earlier in the input file. This is normally not a problem but may cause -issues when when there are data dependencies or duplicates in the import data. In -this case, the number of threads should be set to 1. diff --git a/Documentation/Books/Manual/Programs/Arangoimport/Options.md b/Documentation/Books/Manual/Programs/Arangoimport/Options.md deleted file mode 100644 index 668e52dd80d6..000000000000 --- a/Documentation/Books/Manual/Programs/Arangoimport/Options.md +++ /dev/null @@ -1,6 +0,0 @@ -Arangoimport Options -==================== - -Usage: `arangoimport []` - -@startDocuBlock program_options_arangoimport diff --git a/Documentation/Books/Manual/Programs/Arangoimport/README.md b/Documentation/Books/Manual/Programs/Arangoimport/README.md deleted file mode 100644 index 6b4c65d1a929..000000000000 --- a/Documentation/Books/Manual/Programs/Arangoimport/README.md +++ /dev/null @@ -1,8 +0,0 @@ -Arangoimport -============ - -_Arangoimport_ is a command-line client tool to import data in JSON, CSV and TSV -format to [ArangoDB servers](../Arangod/README.md). - -If you want to restore backups, see [_Arangorestore_](../Arangorestore/README.md) -instead. diff --git a/Documentation/Books/Manual/Programs/Arangoinspect/Examples.md b/Documentation/Books/Manual/Programs/Arangoinspect/Examples.md deleted file mode 100644 index 2d5d88465edc..000000000000 --- a/Documentation/Books/Manual/Programs/Arangoinspect/Examples.md +++ /dev/null @@ -1,116 +0,0 @@ -Arangoinspect Examples -====================== - -If you are asked by ArangoDB support to provide an inspector output, run -the _arangoinspect_ binary to generate a file in the current working folder. - -The resulting JSON file is a collection of meta data acquired from all -involved instances. The data includes relevant operating system parameters, -ArangoDB process parameters, local database information etc. - -{% hint 'warning' %} -Please open the file locally and check if it contains anything that you are -not allowed/willing to share and obfuscate it before sharing (user names, -files paths etc.). -{% endhint %} - -Invoking Arangoinspect ----------------------- - -Point the tool to an ArangoDB endpoint. In case of a single server, there -is only one. You can connect to any node in case of a cluster (_DBServer_, -_Coordinator_, _Agent_). - - arangoinspect --server.endpoint tcp://127.0.0.1:8529 - -This will start the tool with a prompt for the JWT secret and try to connect -to the specified ArangoDB server. You have to type the secret as is used for -the `arangod` option `--server.jwt-secret`. For non-cluster deployments, -you may authenticate with a user name and password instead: - - arangoinspect --server.ask-jwt-secret false --server.username "root" --server.password "foobar" - -The password can be omitted and entered interactively. - -Example outputs ---------------- - -If _arangoinspect_ succeeds to authenticate, it starts to gather information -and writes the result to `arangodb-inspector.json`, then exits: - -``` -arangoinspect --server.endpoint tcp://127.0.0.1:8629 - -Please specify the JWT secret: -Connected to ArangoDB 'http+tcp://127.0.0.1:8629' version: 3.4.devel [server], database: '_system', username: 'root' - - _ ___ _ - / \ _ __ __ _ _ __ __ _ ___ |_ _|_ __ ___ _ __ ___ ___| |_ ___ _ __ - / _ \ | '__/ _` | '_ \ / _` |/ _ \ | || '_ \/ __| '_ \ / _ \/ __| __/ _ \| '__| - / ___ \| | | (_| | | | | (_| | (_) | | || | | \__ \ |_) | __/ (__| || (_) | | -/_/ \_\_| \__,_|_| |_|\__, |\___/ |___|_| |_|___/ .__/ \___|\___|\__\___/|_| - |___/ |_| - -2018-06-05T19:40:10Z [19858] INFO Connected to ArangoDB 'http+tcp://[::1]:4001', version 3.4.devel [server], database '_system', username: 'root' -2018-06-05T19:40:10Z [19858] INFO Connected to ArangoDB 'http+tcp://[::1]:4001', version 3.4.devel [server], database '_system', username: 'root' -INFO changing endpoint for AGNT-01e83a4b-8a51-4919-9f50-ff640accb9fa from http+tcp://[::1]:4001 to tcp://[::1]:4001 -INFO changing endpoint for PRMR-9f5b337e-c1de-4b7d-986a-d6ad2eb8f857 from tcp://127.0.0.1:8629 to tcp://[::1]:8629 -INFO Analysing agency dump ... -INFO Plan (version 22) -INFO Databases -INFO _system -INFO Collections -INFO _system -INFO _graphs -INFO _users -INFO _modules -INFO _iresearch_analyzers -INFO _routing -INFO _aqlfunctions -INFO _frontend -INFO _queues -INFO _jobs -INFO _apps -INFO _appbundles -INFO _statisticsRaw -INFO _statistics -INFO _statistics15 -INFO Server health -INFO DB Servers -INFO PRMR-9f5b337e-c1de-4b7d-986a-d6ad2eb8f857(DBServer0001) -INFO PRMR-90ff8c20-b0f3-49c5-a5dd-7b186bb7db33(DBServer0002) -INFO Coordinators -INFO CRDN-0dbf16ec-8a06-4203-9359-447d97757b4e(Coordinator0001) -INFO Supervision activity -INFO Jobs: undefined(To do: 0, Pending: 0, Finished: 0, Failed: 0) -INFO Summary -INFO 1 databases -INFO 14 collections -INFO 14 shards -INFO ... agency analysis finished. -INFO Collecting diagnostics from all servers ... -2018-06-05T19:40:10Z [19858] INFO Connected to ArangoDB 'http+tcp://[::1]:8629', version 3.4.devel [server], database '_system', username: 'root' -2018-06-05T19:40:11Z [19858] INFO Connected to ArangoDB 'http+tcp://[::1]:4001', version 3.4.devel [server], database '_system', username: 'root' -2018-06-05T19:40:11Z [19858] INFO Connected to ArangoDB 'http+tcp://[::1]:8630', version 3.4.devel [server], database '_system', username: 'root' -2018-06-05T19:40:11Z [19858] INFO Connected to ArangoDB 'http+tcp://[::1]:8530', version 3.4.devel [server], database '_system', username: 'root' -2018-06-05T19:40:11Z [19858] INFO Connected to ArangoDB 'http+tcp://[::1]:4001', version 3.4.devel [server], database '_system', username: 'root' -INFO ... dignostics collected. -INFO Report written to arango-inspector.json. -``` - -If _arangoinspect_ cannot connect or authentication/authorization fails, then a fatal error -will be raised and the tool shuts down: - -``` -Could not connect to endpoint 'http+tcp://127.0.0.1:8529', database: '_system', username: 'root' -Error message: '401: Unauthorized' - - _ ___ _ - / \ _ __ __ _ _ __ __ _ ___ |_ _|_ __ ___ _ __ ___ ___| |_ ___ _ __ - / _ \ | '__/ _` | '_ \ / _` |/ _ \ | || '_ \/ __| '_ \ / _ \/ __| __/ _ \| '__| - / ___ \| | | (_| | | | | (_| | (_) | | || | | \__ \ |_) | __/ (__| || (_) | | -/_/ \_\_| \__,_|_| |_|\__, |\___/ |___|_| |_|___/ .__/ \___|\___|\__\___/|_| - |___/ |_| - -FATAL cannot connect to server 'http+tcp://127.0.0.1:8529': 401: Unauthorized -``` diff --git a/Documentation/Books/Manual/Programs/Arangoinspect/Options.md b/Documentation/Books/Manual/Programs/Arangoinspect/Options.md deleted file mode 100644 index f38867e0f1cd..000000000000 --- a/Documentation/Books/Manual/Programs/Arangoinspect/Options.md +++ /dev/null @@ -1,6 +0,0 @@ -Arangoinspect Options -===================== - -Usage: `arangoinspect []` - -@startDocuBlock program_options_arangoinspect diff --git a/Documentation/Books/Manual/Programs/Arangoinspect/README.md b/Documentation/Books/Manual/Programs/Arangoinspect/README.md deleted file mode 100644 index cff1ab56031f..000000000000 --- a/Documentation/Books/Manual/Programs/Arangoinspect/README.md +++ /dev/null @@ -1,7 +0,0 @@ -Arangoinspect -============= - -_Arangoinspect_ is a command-line client tool that collects information of any -ArangoDB server setup to facilitate troubleshooting for the ArangoDB support. - -The tool is available starting from ArangoDB v.3.3.11. \ No newline at end of file diff --git a/Documentation/Books/Manual/Programs/Arangorestore/Examples.md b/Documentation/Books/Manual/Programs/Arangorestore/Examples.md deleted file mode 100644 index 3c6d3f0411dc..000000000000 --- a/Documentation/Books/Manual/Programs/Arangorestore/Examples.md +++ /dev/null @@ -1,281 +0,0 @@ -Arangorestore Examples -====================== - -To restore data from a dump previously created with [_Arangodump_](../Arangodump/README.md), -ArangoDB provides the _arangorestore_ tool. - -{% hint 'danger' %} -In versions older than 3.3, _Arangorestore_ -**must not be used to create several similar database instances in one installation**. - -This means that if you have an _Arangodump_ output of database ***A***, create a second database ***B*** -on the same instance of ArangoDB, and restore the dump of ***A*** into ***B*** - data integrity can not -be guaranteed. This limitation was solved starting from ArangoDB v3.3.0. -{% endhint %} - -Invoking Arangorestore ----------------------- - -_arangorestore_ can be invoked from the command-line as follows: - - arangorestore --input-directory "dump" - -This will connect to an ArangoDB server (tcp://127.0.0.1:8529 by default), then restore the -collection structure and the documents from the files found in the input directory *dump*. -Note that the input directory must have been created by running *arangodump* before. - -_arangorestore_ will by default connect to the *_system* database using the default -endpoint. To override the endpoint, or specify a different user, use one of the -following startup options: - -- `--server.endpoint `: endpoint to connect to -- `--server.username `: username -- `--server.password `: password to use - (omit this and you'll be prompted for the password) -- `--server.authentication `: whether or not to use authentication - -If you want to connect to a different database or dump all databases you can additionally -use the following startup options: - -- `--server.database `: name of the database to connect to. - Defaults to the `_system` database. -- `--all-databases true`: restore multiple databases from a dump which used the same option. - Introduced in v3.5.0. - -Note that the specified user must have access to the database(s). - -Since version 2.6 _arangorestore_ provides the option *--create-database*. Setting this -option to *true* will create the target database if it does not exist. When creating the -target database, the username and passwords passed to _arangorestore_ (in options -*--server.username* and *--server.password*) will be used to create an initial user for the -new database. - -The option `--force-same-database` allows restricting arangorestore operations to a -database with the same name as in the source dump's `dump.json` file. It can thus be used -to prevent restoring data into a "wrong" database by accident. - -For example, if a dump was taken from database ***A***, and the restore is attempted into -database ***B***, then with the `--force-same-database` option set to `true`, arangorestore -will abort instantly. - -The `--force-same-database` option is set to `false` by default to ensure backwards-compatibility. - -Here's an example of reloading data to a non-standard endpoint, using a dedicated -[database name](../../Appendix/Glossary.md#database-name): - - arangorestore --server.endpoint tcp://192.168.173.13:8531 --server.username backup --server.database mydb --input-directory "dump" - -To create the target database whe restoring, use a command like this: - - arangorestore --server.username backup --server.database newdb --create-database true --input-directory "dump" - -In contrast to the above calls, when working with multiple databases using `--all-databases true` -the parameter `--server.database mydb` must not be specified: - - arangorestore --server.username backup --all-databases true --create-database true --input-directory "dump-multiple" - -_arangorestore_ will print out its progress while running, and will end with a line -showing some aggregate statistics: - - Processed 2 collection(s), read 2256 byte(s) from datafiles, sent 2 batch(es) - - -By default, _arangorestore_ will re-create all non-system collections found in the input -directory and load data into them. If the target database already contains collections -which are also present in the input directory, the existing collections in the database -will be dropped and re-created with the data found in the input directory. - -The following parameters are available to adjust this behavior: - -- `--create-collection `: set to *true* to create collections in the target - database. If the target database already contains a collection with the same name, - it will be dropped first and then re-created with the properties found in the input - directory. Set to *false* to keep existing collections in the target database. If - set to *false* and _arangorestore_ encounters a collection that is present in the - input directory but not in the target database, it will abort. The default value is *true*. -- `--import-data `: set to *true* to load document data into the collections in - the target database. Set to *false* to not load any document data. The default value - is *true*. -- `--include-system-collections `: whether or not to include system collections - when re-creating collections or reloading data. The default value is *false*. - -For example, to (re-)create all non-system collections and load document data into them, use: - - arangorestore --create-collection true --import-data true --input-directory "dump" - -This will drop potentially existing collections in the target database that are also present -in the input directory. - -To include system collections too, use `--include-system-collections true`: - - arangorestore --create-collection true --import-data true --include-system-collections true --input-directory "dump" - -To (re-)create all non-system collections without loading document data, use: - - arangorestore --create-collection true --import-data false --input-directory "dump" - -This will also drop existing collections in the target database that are also present in the -input directory. - -To just load document data into all non-system collections, use: - - arangorestore --create-collection false --import-data true --input-directory "dump" - -To restrict reloading to just specific collections, there is is the `--collection` option. -It can be specified multiple times if required: - - arangorestore --collection myusers --collection myvalues --input-directory "dump" - -Collections will be processed by in alphabetical order by _arangorestore_, with all document -collections being processed before all [edge collections](../../Appendix/Glossary.md#edge-collection). -This remains valid also when multiple threads are in use (from v3.4.0 on). - -Note however that when restoring an edge collection no internal checks are made in order to validate that -the documents that the edges connect exist or not. As a consequence, when restoring individual collections -which are part of a graph you are not required to restore in a specific order. - -{% hint 'warning' %} -When restoring only a subset of collections of your database, and graphs are in use, you will need -to make sure you are restoring all the needed collections (the ones that are part of the graph) as -otherwise you might have edges pointing to non existing documents. -{% endhint %} - -To restrict reloading to specific views, there is the `--view` option. -Should you specify the `--collection` parameter views will not be restored _unless_ you explicitly -specify them via the `--view` option. - - arangorestore --collection myusers --view myview --input-directory "dump" - -In the case of an arangosearch view you must make sure that the linked collections are either -also restored or already present on the server. - -Encryption ----------- - -See [Arangodump](../Arangodump/Examples.md#encryption) for details. - -Reloading Data into a different Collection ------------------------------------------- - -_arangorestore_ will restore document and edges data with the exact same *_key*, *_rev*, *_from* -and *_to* values as found in the input directory. - -With some creativity you can also use _arangodump_ and _arangorestore_ to transfer data from one -collection into another (either on the same server or not). For example, to copy data from -a collection *myvalues* in database *mydb* into a collection *mycopyvalues* in database *mycopy*, -you can start with the following command: - - arangodump --collection myvalues --server.database mydb --output-directory "dump" - -This will create two files, `myvalues.structure.json` and `myvalues.data.json`, in the output -directory. To load data from the datafile into an existing collection *mycopyvalues* in database -*mycopy*, rename the files to `mycopyvalues.structure.json` and `mycopyvalues.data.json`. - -After that, run the following command: - - arangorestore --collection mycopyvalues --server.database mycopy --input-directory "dump" - -Restoring in a Cluster ----------------------- - -From v2.1 on, the *arangorestore* tool supports sharding and can be -used to restore data into a Cluster. Simply point it to one of the -_Coordinators_ in your Cluster and it will work as usual but on sharded -collections in the Cluster. - -If *arangorestore* is asked to restore a collection, it will use the same -number of shards, replication factor and shard keys as when the collection -was dumped. The distribution of the shards to the servers will also be the -same as at the time of the dump, provided that the number of _DBServers_ in -the cluster dumped from is identical to the number of DBServers in the -to-be-restored-to cluster. - -To modify the number of _shards_ or the _replication factor_ for all or just -some collections, *arangorestore* provides the options `--number-of-shards` -and `--replication-factor` (starting from v3.3.22 and v3.4.2). These options -can be specified multiple times as well, in order to override the settings -for dedicated collections, e.g. - - arangorestore --number-of-shards 2 --number-of-shards mycollection=3 --number-of-shards test=4 - -The above will restore all collections except "mycollection" and "test" with -2 shards. "mycollection" will have 3 shards when restored, and "test" will -have 4. It is possible to omit the default value and only use -collection-specific overrides. In this case, the number of shards for any -collections not overridden will be determined by looking into the -"numberOfShards" values contained in the dump. - -The `--replication-factor` options works in the same way, e.g. - - arangorestore --replication-factor 2 --replication-factor mycollection=1 - -will set the replication factor to 2 for all collections but "mycollection", which will get a -replication factor of just 1. - -{% hint 'info' %} -The options `--number-of-shards` and `replication-factor`, as well as the deprecated -options `--default-number-of-shards` and `--default-replication-factor`, are -**not applicable to system collections**. They are managed by the server. -{% endhint %} - -If a collection was dumped from a single instance and is then restored into -a cluster, the sharding will be done by the `_key` attribute by default. One can -manually edit the structural description for the shard keys in the dump files if -required (`*.structure.json`). - -If you restore a collection that was dumped from a cluster into a single -ArangoDB instance, the number of shards, replication factor and shard keys will silently -be ignored. - -### Factors affecting speed of arangorestore in a Cluster - -The following factors affect speed of _arangorestore_ in a Cluster: - -- **Replication Factor**: the higher the _replication factor_, the more - time the restore will take. To speed up the restore you can restore - using a _replication factor_ of 1 and then increase it again - after the restore. This will reduce the number of network hops needed - during the restore. -- **Restore Parallelization**: if the collections are not restored in - parallel, the restore speed is highly affected. A parallel restore can - be done from v3.4.0 by using the `--threads` option of _arangorestore_. - Before v3.4.0 it is possible to achieve parallelization by restoring - on multiple _Coordinators_ at the same time. Depending on your specific - case, parallelizing on multiple _Coordinators_ can still be useful even - when the `--threads` option is in use (from v.3.4.0). - -{% hint 'tip' %} -Please refer to the [Fast Cluster Restore](FastClusterRestore.md) page -for further operative details on how to take into account, when restoring -using _arangorestore_, the two factors described above. -{% endhint %} - -### Restoring collections with sharding prototypes - -*arangorestore* will yield an error when trying to restore a -collection whose shard distribution follows a collection which does -not exist in the cluster and which was not dumped along: - - arangorestore --collection clonedCollection --server.database mydb --input-directory "dump" - - ERROR got error from server: HTTP 500 (Internal Server Error): ArangoError 1486: must not have a distributeShardsLike attribute pointing to an unknown collection - Processed 0 collection(s), read 0 byte(s) from datafiles, sent 0 batch(es) - -The collection can be restored by overriding the error message as -follows: - - arangorestore --collection clonedCollection --server.database mydb --input-directory "dump" --ignore-distribute-shards-like-errors - -Restore into an authentication-enabled ArangoDB ------------------------------------------------ - -Of course you can restore data into a password-protected ArangoDB as well. -However this requires certain user rights for the user used in the restore process. -The rights are described in detail in the [Managing Users](../../Administration/ManagingUsers/README.md) chapter. -For restore this short overview is sufficient: - -- When importing into an existing database, the given user needs `Administrate` - access on this database. -- When creating a new database during restore, the given user needs `Administrate` - access on `_system`. The user will be promoted with `Administrate` access on the - newly created database. diff --git a/Documentation/Books/Manual/Programs/Arangorestore/FastClusterRestore.md b/Documentation/Books/Manual/Programs/Arangorestore/FastClusterRestore.md deleted file mode 100644 index 05e144fcc8f5..000000000000 --- a/Documentation/Books/Manual/Programs/Arangorestore/FastClusterRestore.md +++ /dev/null @@ -1,279 +0,0 @@ -Fast Cluster Restore -==================== - -The _Fast Cluster Restore_ procedure documented in this page is recommended -to speed-up the performance of [_arangorestore_](../Arangorestore/README.md) -in a Cluster environment. - -It is assumed that a Cluster environment is running and a _logical_ backup -with [_arangodump_](../Arangodump/README.md) has already been taken. - -{% hint 'info' %} -The procedure described in this page is particularly useful for ArangoDB -version 3.3, but can be used in 3.4 and later versions as well. Note that -from v3.4, _arangorestore_ includes the option `--threads` which can be a first -good step already in achieving restore parallelization and its speed benefit. -However, the procedure below allows for even further parallelization (making -use of different _Coordinators_), and the part regarding temporarily setting -_replication factor_ to 1 is still useful in 3.4 and later versions. -{% endhint %} - -The speed improvement obtained by the procedure below is achieved by: - -1. Restoring into a Cluster that has _replication factor_ 1, thus reducing - number of network hops needed during the restore operation (_replication factor_ - is reverted to initial value at the end of the procedure - steps #2, #3 and #6). -2. Restoring in parallel multiple collections on different _Coordinators_ - (steps #4 and #5). - -{% hint 'info' %} -Please refer to -[this](Examples.md#factors-affecting-speed-of-arangorestore-in-a-cluster) -section for further context on the factors affecting restore speed when restoring -using _arangorestore_ in a Cluster. -{% endhint %} - -Step 1: Copy the _dump_ directory to all _Coordinators_ -------------------------------------------------------- - -The first step is to copy the directory that contains the _dump_ to all machines -where _Coordinators_ are running. - -{% hint 'tip' %} -This step is not strictly required as the backup can be restored over the -network. However, if the restore is executed locally the restore speed is -significantly improved. -{% endhint %} - -Step 2: Restore collection structures -------------------------------------- - -The collection structures have to be restored from exactly one _Coordinator_ (any -_Coordinator_ can be used) with a command similar to the following one. Please add -any additional needed option for your specific use case, e.g. `--create-database` -if the database where you want to restore does not exist yet: - -``` -arangorestore - --server.endpoint - --server.database - --server.password - --import-data false - --input-directory -``` - -{% hint 'info' %} -If you are using v3.3.22 or higher, or v3.4.2 or higher, please also add in the -command above the option `--replication-factor 1`. -{% endhint %} - -The option `--import-data false` tells _arangorestore_ to restore only the -collection structure and no data. - -Step 3: Set _Replication Factor_ to 1 --------------------------------------- - -{% hint 'info' %} -This step is **not** needed if you are using v3.3.22 or higher or v3.4.2 or higher -and you have used in the previous step the option `--replication-factor 1`. -{% endhint %} - -To speed up restore, it is possible to set the _replication factor_ to 1 before -importing any data. Run the following command from exactly one _Coordinator_ (any -_Coordinator_ can be used): - -``` -echo 'db._collections().filter(function(c) { return c.name()[0] !== "_"; }) -.forEach(function(c) { print("collection:", c.name(), "replicationFactor:", -c.properties().replicationFactor); c.properties({ replicationFactor: 1 }); });' -| arangosh - --server.endpoint - --server.database - --server.username - --server.password -``` - -Step 4: Create parallel restore scripts ---------------------------------------- - -Now that the Cluster is prepared, the `parallelRestore` script will be used. - -Please create the below `parallelRestore` script in any of your _Coordinators_. - -When executed (see below for further details), this script will create other scripts -that can be then copied and executed on each _Coordinator_. - - -``` -#!/bin/sh -# -# Version: 0.3 -# -# Release Notes: -# - v0.3: fixed a bug that was happening when the collection name included an underscore -# - v0.2: compatibility with version 3.4: now each coordinator_.sh -# includes a single restore command (instead of one for each collection) -# which allows making using of the --threads option in v.3.4.0 and later -# - v0.1: initial version - -if test -z "$ARANGOSH" ; then - export ARANGOSH=arangosh -fi -cat > /tmp/parallelRestore$$.js <<'EOF' -var fs = require("fs"); -var print = require("internal").print; -var exit = require("internal").exit; -var arangorestore = "arangorestore"; -var env = require("internal").env; -if (env.hasOwnProperty("ARANGORESTORE")) { - arangorestore = env["ARANGORESTORE"]; -} - -// Check ARGUMENTS: dumpDir coordinator1 coordinator2 ... - -if (ARGUMENTS.length < 2) { - print("Need at least two arguments DUMPDIR and COORDINATOR_ENDPOINTS!"); - exit(1); -} - -var dumpDir = ARGUMENTS[0]; -var coordinators = ARGUMENTS[1].split(","); -var otherArgs = ARGUMENTS.slice(2); - -// Quickly check the dump dir: -var files = fs.list(dumpDir).filter(f => !fs.isDirectory(f)); -var found = files.indexOf("ENCRYPTION"); -if (found === -1) { - print("This directory does not have an ENCRYPTION entry."); - exit(2); -} -// Remove ENCRYPTION entry: -files = files.slice(0, found).concat(files.slice(found+1)); - -for (let i = 0; i < files.length; ++i) { - if (files[i].slice(-5) !== ".json") { - print("This directory has files which do not end in '.json'!"); - exit(3); - } -} -files = files.map(function(f) { - var fullName = fs.join(dumpDir, f); - var collName = ""; - if (f.slice(-10) === ".data.json") { - var pos; - if (f.slice(0, 1) === "_") { // system collection - pos = f.slice(1).indexOf("_") + 1; - collName = "_" + f.slice(1, pos); - } else { - pos = f.lastIndexOf("_") - collName = f.slice(0, pos); - } - } - return {name: fullName, collName, size: fs.size(fullName)}; -}); -files = files.sort(function(a, b) { return b.size - a.size; }); -var dataFiles = []; -for (let i = 0; i < files.length; ++i) { - if (files[i].name.slice(-10) === ".data.json") { - dataFiles.push(i); - } -} - -// Produce the scripts, one for each coordinator: -var scripts = []; -var collections = []; -for (let i = 0; i < coordinators.length; ++i) { - scripts.push([]); - collections.push([]); -} - -var cnum = 0; -var temp = ''; -var collections = []; -for (let i = 0; i < dataFiles.length; ++i) { - var f = files[dataFiles[i]]; - if (typeof collections[cnum] == 'undefined') { - collections[cnum] = (`--collection ${f.collName}`); - } else { - collections[cnum] += (` --collection ${f.collName}`); - } - cnum += 1; - if (cnum >= coordinators.length) { - cnum = 0; - } -} - -var cnum = 0; -for (let i = 0; i < coordinators.length; ++i) { - scripts[i].push(`${arangorestore} --input-directory ${dumpDir} --server.endpoint ${coordinators[i]} ` + collections[i] + ' ' + otherArgs.join(" ")); -} - -for (let i = 0; i < coordinators.length; ++i) { - let f = "coordinator_" + i + ".sh"; - print("Writing file", f, "..."); - fs.writeFileSync(f, scripts[i].join("\n")); -} -EOF - -${ARANGOSH} --javascript.execute /tmp/parallelRestore$$.js -- "$@" -rm /tmp/parallelRestore$$.js -``` - -To run this script, all _Coordinator_ endpoints of the Cluster have to be -provided. The script accepts all options of the tool _arangorestore_. - -The command below can for instance be used on a Cluster with three -_Coordinators_: - -``` -./parallelRestore - tcp://:, - tcp://:, - tcp://: - --server.username - --server.password - --server.database - --create-collection false -``` - -**Notes:** - - - The option `--create-collection false` is passed since the collection - structures were created already in the previous step. - - Starting from v3.4.0 the _arangorestore_ option *--threads N* can be - passed to the command above, where _N_ is an integer, to further parallelize - the restore (default is `--threads 2`). - -The above command will create three scripts, where three corresponds to -the amount of listed _Coordinators_. - -The resulting scripts are named `coordinator_.sh` (e.g. -`coordinator_0.sh`, `coordinator_1.sh`, `coordinator_2.sh`). - -Step 5: Execute parallel restore scripts ----------------------------------------- - -The `coordinator_.sh` scripts, that were created in the -previous step, now have to be executed on each machine where a _Coordinator_ -is running. This will start a parallel restore of the dump. - -Step 6: Revert to the initial _Replication Factor_ --------------------------------------------------- - -Once the _arangorestore_ process on every _Coordinator_ is completed, the -_replication factor_ has to be set to its initial value. - -Run the following command from exactly one _Coordinator_ (any _Coordinator_ can be -used). Please adjust the `replicationFactor` value to your specific case (2 in the -example below): - -``` -echo 'db._collections().filter(function(c) { return c.name()[0] !== "_"; }) -.forEach(function(c) { print("collection:", c.name(), "replicationFactor:", -c.properties().replicationFactor); c.properties({ replicationFactor: 2 }); });' -| arangosh - --server.endpoint - --server.database - --server.username - --server.password -``` diff --git a/Documentation/Books/Manual/Programs/Arangorestore/Options.md b/Documentation/Books/Manual/Programs/Arangorestore/Options.md deleted file mode 100644 index 43af633c6b86..000000000000 --- a/Documentation/Books/Manual/Programs/Arangorestore/Options.md +++ /dev/null @@ -1,6 +0,0 @@ -Arangorestore Options -===================== - -Usage: `arangorestore []` - -@startDocuBlock program_options_arangorestore diff --git a/Documentation/Books/Manual/Programs/Arangorestore/README.md b/Documentation/Books/Manual/Programs/Arangorestore/README.md deleted file mode 100644 index f55667cecb93..000000000000 --- a/Documentation/Books/Manual/Programs/Arangorestore/README.md +++ /dev/null @@ -1,20 +0,0 @@ -Arangorestore -============= - -_Arangorestore_ is a command-line client tool to restore backups created by -[_Arangodump_](../Arangodump/README.md) to -[ArangoDB servers](../Arangod/README.md). - -If you want to import data in formats like JSON or CSV, see -[_Arangoimport_](../Arangoimport/README.md) instead. - -_Arangorestore_ can restore selected collections or all collections of a backup, -optionally including _system_ collections. One can restore the structure, i.e. -the collections with their configuration with or without data. -Views can also be dumped or restored (either all of them or selectively). - -{% hint 'tip' %} -In order to speed up the _arangorestore_ performance in a Cluster environment, -the [Fast Cluster Restore](FastClusterRestore.md) -procedure is recommended. -{% endhint %} diff --git a/Documentation/Books/Manual/Programs/Arangosh/Details.md b/Documentation/Books/Manual/Programs/Arangosh/Details.md deleted file mode 100644 index 37fe007878e9..000000000000 --- a/Documentation/Books/Manual/Programs/Arangosh/Details.md +++ /dev/null @@ -1,186 +0,0 @@ -Arangosh Details -================ - -Interaction ------------ - -You can paste multiple lines into Arangosh, given the first line ends with an -opening brace: - - @startDocuBlockInline shellPaste - @EXAMPLE_ARANGOSH_OUTPUT{shellPaste} - |for (var i = 0; i < 10; i ++) { - | require("@arangodb").print("Hello world " + i + "!\n"); - } - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock shellPaste - - -To load your own JavaScript code into the current JavaScript interpreter context, -use the load command: - - require("internal").load("/tmp/test.js") // <- Linux / macOS - require("internal").load("c:\\tmp\\test.js") // <- Windows - -Exiting arangosh can be done using the key combination ``` + D``` or by -typing ```quit``` - -Shell Output ------------- - -The ArangoDB shell will print the output of the last evaluated expression -by default: - - @startDocuBlockInline lastExpressionResult - @EXAMPLE_ARANGOSH_OUTPUT{lastExpressionResult} - 42 * 23 - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock lastExpressionResult - -In order to prevent printing the result of the last evaluated expression, -the expression result can be captured in a variable, e.g. - - @startDocuBlockInline lastExpressionResultCaptured - @EXAMPLE_ARANGOSH_OUTPUT{lastExpressionResultCaptured} - var calculationResult = 42 * 23 - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock lastExpressionResultCaptured - -There is also the `print` function to explicitly print out values in the -ArangoDB shell: - - @startDocuBlockInline printFunction - @EXAMPLE_ARANGOSH_OUTPUT{printFunction} - print({ a: "123", b: [1,2,3], c: "test" }); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock printFunction - -By default, the ArangoDB shell uses a pretty printer when JSON documents are -printed. This ensures documents are printed in a human-readable way: - - @startDocuBlockInline usingToArray - @EXAMPLE_ARANGOSH_OUTPUT{usingToArray} - db._create("five") - for (i = 0; i < 5; i++) db.five.save({value:i}) - db.five.toArray() - ~db._drop("five"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock usingToArray - -While the pretty-printer produces nice looking results, it will need a lot of -screen space for each document. Sometimes a more dense output might be better. -In this case, the pretty printer can be turned off using the command -*stop_pretty_print()*. - -To turn on pretty printing again, use the *start_pretty_print()* command. - -Escaping --------- - -In AQL, escaping is done traditionally with the backslash character: `\`. -As seen above, this leads to double backslashes when specifying Windows paths. -Arangosh requires another level of escaping, also with the backslash character. -It adds up to four backslashes that need to be written in Arangosh for a single -literal backslash (`c:\tmp\test.js`): - - db._query('RETURN "c:\\\\tmp\\\\test.js"') - -You can use [bind variables](../../../AQL/Invocation/WithArangosh.html) to -mitigate this: - - var somepath = "c:\\tmp\\test.js" - db._query(aql`RETURN ${somepath}`) - -Database Wrappers ------------------ - -_Arangosh_ provides the *db* object by default, and this object can -be used for switching to a different database and managing collections inside the -current database. - -For a list of available methods for the *db* object, type - - @startDocuBlockInline shellHelp - @EXAMPLE_ARANGOSH_OUTPUT{shellHelp} - db._help(); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock shellHelp - -The [`db` object](../../Appendix/References/DBObject.md) is available in *arangosh* -as well as on *arangod* i.e. if you're using [Foxx](../../Foxx/README.md). While its -interface is persistent between the *arangosh* and the *arangod* implementations, -its underpinning is not. The *arangod* implementation are JavaScript wrappers -around ArangoDB's native C++ implementation, whereas the *arangosh* implementation -wraps HTTP accesses to ArangoDB's [RESTfull API](../../../HTTP/index.html). - -So while this code may produce similar results when executed in *arangosh* and -*arangod*, the CPU usage and time required will be really different since the -*arangosh* version will be doing around 100k HTTP requests, and the -*arangod* version will directly write to the database: - -```js -for (i = 0; i < 100000; i++) { - db.test.save({ name: { first: "Jan" }, count: i}); -} -``` - -Using `arangosh` via unix shebang mechanisms --------------------------------------------- -In unix operating systems you can start scripts by specifying the interpreter in the first line of the script. -This is commonly called `shebang` or `hash bang`. You can also do that with `arangosh`, i.e. create `~/test.js`: - - #!/usr/bin/arangosh --javascript.execute - require("internal").print("hello world") - db._query("FOR x IN test RETURN x").toArray() - -Note that the first line has to end with a blank in order to make it work. -Mark it executable to the OS: - - #> chmod a+x ~/test.js - -and finaly try it out: - - #> ~/test.js - - -Shell Configuration -------------------- - -_arangosh_ will look for a user-defined startup script named *.arangosh.rc* in the -user's home directory on startup. The home directory will likely be `/home//` -on Unix/Linux, and is determined on Windows by peeking into the environment variables -`%HOMEDRIVE%` and `%HOMEPATH%`. - -If the file *.arangosh.rc* is present in the home directory, _arangosh_ will execute -the contents of this file inside the global scope. - -You can use this to define your own extra variables and functions that you need often. -For example, you could put the following into the *.arangosh.rc* file in your home -directory: - -```js -// "var" keyword avoided intentionally... -// otherwise "timed" would not survive the scope of this script -global.timed = function (cb) { - console.time("callback"); - cb(); - console.timeEnd("callback"); -}; -``` - -This will make a function named *timed* available in _arangosh_ in the global scope. - -You can now start _arangosh_ and invoke the function like this: - -```js -timed(function () { - for (var i = 0; i < 1000; ++i) { - db.test.save({ value: i }); - } -}); -``` - -Please keep in mind that, if present, the *.arangosh.rc* file needs to contain valid -JavaScript code. If you want any variables in the global scope to survive you need to -omit the *var* keyword for them. Otherwise the variables will only be visible inside -the script itself, but not outside. diff --git a/Documentation/Books/Manual/Programs/Arangosh/Examples.md b/Documentation/Books/Manual/Programs/Arangosh/Examples.md deleted file mode 100644 index c4e46267bb50..000000000000 --- a/Documentation/Books/Manual/Programs/Arangosh/Examples.md +++ /dev/null @@ -1,70 +0,0 @@ -Arangosh Examples -================= - -Connecting to a server ----------------------- - -By default _arangosh_ will try to connect to an ArangoDB server running on -server *localhost* on port *8529*. It will use the username *root* and an -empty password by default. Additionally it will connect to the default database -(*_system*). All these defaults can be changed using the following -command-line options: - -- `--server.database `: name of the database to connect to -- `--server.endpoint `: endpoint to connect to -- `--server.username `: database username -- `--server.password `: password to use when connecting -- `--server.authentication `: whether or not to use authentication - -For example, to connect to an ArangoDB server on IP *192.168.173.13* on port -8530 with the user *foo* and using the database *test*, use: - - arangosh --server.endpoint tcp://192.168.173.13:8530 --server.username foo --server.database test --server.authentication true - -_arangosh_ will then display a password prompt and try to connect to the -server after the password was entered. - -The shell will print its own version number and if successfully connected -to a server the version number of the ArangoDB server. - -{% hint 'tip' %} -If the server endpoint is configured for SSL then clients such as _arangosh_ -need to connect to it using an SSL socket as well. For example, use `http+ssl://` -as schema in `--server.endpoint` for an SSL-secured HTTP connection. -{% endhint %} - -The schema of an endpoint is comprised of a protocol and a socket in the format -`protocol+socket://`. There are alternatives and shorthands for some combinations, -`ssl://` is equivalent to `http+ssl://` and `https://` for instance: - -Protocol | Socket | Schema --------------|------------------|----------- -HTTP | TCP | `http+tcp`, `http+srv`, `http`, `tcp` -HTTP | TCP with SSL/TLS | `http+ssl`, `https`, `ssl` -HTTP | Unix | `http+unix`, `unix` -VelocyStream | TCP | `vst+tcp`, `vst+srv`, `vst` -VelocyStream | TCP with SSL/TLS | `vst+ssl`, `vsts` -VelocyStream | Unix | `vst+unix` - -Using Arangosh --------------- - -To change the current database after the connection has been made, you -can use the `db._useDatabase()` command in Arangosh: - - @startDocuBlockInline shellUseDB - @EXAMPLE_ARANGOSH_OUTPUT{shellUseDB} - db._createDatabase("myapp"); - db._useDatabase("myapp"); - db._useDatabase("_system"); - db._dropDatabase("myapp"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock shellUseDB - -To get a list of available commands, Arangosh provides a *help()* function. -Calling it will display helpful information. - -_arangosh_ also provides auto-completion. Additional information on available -commands and methods is thus provided by typing the first few letters of a -variable and then pressing the tab key. It is recommend to try this with entering -*db.* (without pressing return) and then pressing tab. diff --git a/Documentation/Books/Manual/Programs/Arangosh/Options.md b/Documentation/Books/Manual/Programs/Arangosh/Options.md deleted file mode 100644 index bbbfd7a91d91..000000000000 --- a/Documentation/Books/Manual/Programs/Arangosh/Options.md +++ /dev/null @@ -1,6 +0,0 @@ -Arangosh Options -================ - -Usage: `arangosh []` - -@startDocuBlock program_options_arangosh diff --git a/Documentation/Books/Manual/Programs/Arangosh/README.md b/Documentation/Books/Manual/Programs/Arangosh/README.md deleted file mode 100644 index d5c44e1d386f..000000000000 --- a/Documentation/Books/Manual/Programs/Arangosh/README.md +++ /dev/null @@ -1,15 +0,0 @@ -Arangosh -======== - -The ArangoDB shell (_arangosh_) is a command-line client tool that can be used -for administration of ArangoDB servers. - -It offers a V8 JavaScript shell environment, in which you can use JS interfaces -and modules like the [`db` object](../../Appendix/References/DBObject.md) to -manage collections or run ad-hoc queries for instance, access the -[General Graph module](../../Graphs/GeneralGraphs/README.md) or other features. - -It can be used as interactive shell (REPL) as well as to execute a JavaScript -string or file. It is not a general command line like PowerShell or Bash however. -Commands like `curl` or invocations of [ArangoDB programs and tools](../README.md) -are not possible inside of this JS shell! diff --git a/Documentation/Books/Manual/Programs/FoxxCLI/Details.md b/Documentation/Books/Manual/Programs/FoxxCLI/Details.md deleted file mode 100644 index 238e955916eb..000000000000 --- a/Documentation/Books/Manual/Programs/FoxxCLI/Details.md +++ /dev/null @@ -1,172 +0,0 @@ - -# Foxx CLI Details - -## Install - -**foxx-cli** runs on [Node.js](https://nodejs.org) and can be installed with -[yarn](https://yarnpkg.com): - -```sh -yarn global add foxx-cli -``` - -Or with [npm](https://www.npmjs.com): - -```sh -npm install --global foxx-cli -``` - -**Note**: using yarn you can also run **foxx-cli** from your project's -`devDependencies`: - -```sh -yarn add --dev foxx-cli -yarn foxx help -``` - -If you're using a recent version of npm you can also use npx: - -```sh -npx -p foxx-cli foxx help -``` - -## Usage - -After you've installed **foxx-cli**, you should be able to use the `foxx` -program. You can learn more about the different commands `foxx` supports by -using the `--help` flag. - -```sh -foxx --help -``` - -You can also use the `--help` flag with commands to learn more about them, e.g.: - -```sh -foxx install --help # Help for the "install" command - -foxx server --help # Help for the "server" command - -foxx server list --help # Subcommands are supported, too -``` - -If you have no prior knowledge of Foxx, you can get started by [installing ArangoDB locally](https://www.arangodb.com/download) and then creating a new Foxx service in the current directory using the `init` command: - -```sh -foxx init -i # answer the interactive questions -``` - -If you want an example, you can also let `init` create an example service for you: - -```sh -foxx init -e # create an example service please -``` - -You can also just use `foxx init` to create a minimal service without the example code. - -You can inspect the files created by the program and tweak them as necessary. Once you're ready, install the service at a _mount path_ using the `install` command: - -```sh -foxx install /hello-foxx # installs the current directory -``` - -You should then be able to view the installed service in your browser at the following URL: - - - -If you continue to work on your Foxx service and want to upgrade the installed version with your local changes use the `upgrade` command to do so. - -```sh -foxx upgrade /hello-foxx # upgrades the server with the current directory -``` - -## Special files - -### manifest.json - -The `manifest.json` or manifest file contains a service's meta-information. For -more information on the manifest format, see the -[official ArangoDB documentation](https://docs.arangodb.com/3/Manual/Foxx/Manifest.html). - -The directory containing a service's `manifest.json` file is called the _root -directory_ of the service. - -### foxxignore - -If you want to exclude files from the service bundle that will uploaded to -ArangoDB you can create a file called `.foxxignore` in the root directory of -your service. Each line should specify one pattern you wish to ignore: - -* Patterns starting with `!` will be treated as an explicit whitelist. Paths - matching these patterns will not be ignored even if they would match any of - the other patterns. - - **Example**: `!index.js` will override any pattern matching a file called - `index.js`. - -* Patterns starting with `/` will only match paths relative to the service's - root directory. - - **Example**: `/package.json` will not match `node_modules/joi/package.json`. - -* Patterns ending with `/` will match a directory and any files inside of it. - - **Example**: `node_modules/` will exclude all `node_modules` directories and - all of their contents. - -* A single `*` (glob) will match zero or more characters (even dots) in a file - or directory name. - - **Example**: `.*` will match any files and directories with a name starting - with a dot. - -* A double `**` (globstar) will match zero or more levels of nesting. - - **Example**: `hello/**/world` will match `hello/world`, `hello/foo/world`, - `hello/foo/bar/world`, and so on. - -* Patterns starting with `#` are considered comments and will be ignored. - -For more details on the pattern matching behaviour, see the documentation of the -[minimatch](https://www.npmjs.com/package/minimatch) module (with the `dot` flag -enabled). - -If no `.foxxignore` file is present in the service's root directory the -following patterns will be ignored automatically: `.git/`, `.svn/`, `.hg/`, -`*.swp`, `.DS_Store`. - -Should you need to include files that match these patterns for some reason, you -can override this list by creating an empty `.foxxignore` file. - -You can also create a `.foxxignore` file in the current directory using the -`ignore` command: - -```sh -foxx ignore # creates a file pre-populated with the defaults - -foxx ignore --force # creates an empty file -``` - -To add individual patterns to the `.foxxignore` file just pass them as -additional arguments: - -```sh -foxx ignore .git/ .svn/ # you can pass multiple patterns at once - -foxx ignore '*.swp' # make sure to escape special characters -``` - -### foxxrc - -If you define servers using the `server` commands, a `.foxxrc` file will be -created in your `$HOME` directory, which is typically one of the following -paths: - -* `/home/$USER` on Linux - -* `/Users/$USER` on macOS - -* `C:\Users\$USER` on Windows - -This file contains sections for each server which may contain server credentials -should you decide to save them. diff --git a/Documentation/Books/Manual/Programs/FoxxCLI/README.md b/Documentation/Books/Manual/Programs/FoxxCLI/README.md deleted file mode 100644 index b5e378bc50cb..000000000000 --- a/Documentation/Books/Manual/Programs/FoxxCLI/README.md +++ /dev/null @@ -1,10 +0,0 @@ - -# Foxx CLI - -Foxx CLI is command line tool for managing and developing ArangoDB -Foxx services. It is an optional tool which requires Node.js and -can be installed via the package managers NPM and Yarn. - -It is the successor of `foxx-manager`, which is deprecated and will be -removed eventually. Also see [Foxx Deployment](../../Foxx/Deployment.md) -for additional deployment options. diff --git a/Documentation/Books/Manual/Programs/README.md b/Documentation/Books/Manual/Programs/README.md deleted file mode 100644 index a342f495cdd6..000000000000 --- a/Documentation/Books/Manual/Programs/README.md +++ /dev/null @@ -1,36 +0,0 @@ -Programs & Tools -================ - -The full ArangoDB package ships with the following programs and tools: - -| Binary name | Brief description | -|-----------------|-------------------| -| `arangod` | [ArangoDB server](Arangod/README.md). This server program is intended to run as a daemon process / service to serve the various client connections to the server via TCP / HTTP. It also provides a [web interface](WebInterface/README.md). -| `arangosh` | [ArangoDB shell](Arangosh/README.md). A client that implements a read-eval-print loop (REPL) and provides functions to access and administrate the ArangoDB server. -| `arangodb` | [ArangoDB Starter](Starter/README.md) for easy deployment of ArangoDB instances. -| `arangodump` | Tool to [create backups](Arangodump/README.md) of an ArangoDB database. -| `arangorestore` | Tool to [load backups](Arangorestore/README.md) back into an ArangoDB database. -| `arangoimport` | [Bulk importer](Arangoimport/README.md) for the ArangoDB server. It supports JSON and CSV. -| `arangoexport` | [Bulk exporter](Arangoexport/README.md) for the ArangoDB server. It supports JSON, CSV and XML. -| `arango-dfdb` | [Datafile debugger](Arango-dfdb/README.md) for ArangoDB (MMFiles storage engine only). -| `arangobench` | [Benchmark and test tool](Arangobench/README.md). It can be used for performance and server function testing. -| `arangoinspect` | [Inspection tool](Arangoinspect/README.md) that gathers server setup information. -| `arangovpack` | Utility to convert [VelocyPack](https://github.com/arangodb/velocypack) data to JSON. - -The client package comes with a subset of programs and tools: - -- arangosh -- arangoimport -- arangoexport -- arangodump -- arangorestore -- arangobench -- arangoinspect -- arangovpack - -Additional tools which are available separately: - -| Name | Brief description | -|-----------------|-------------------| -| [Foxx CLI](FoxxCLI/README.md) | Command line tool for managing and developing Foxx services -| [kube-arangodb](../Deployment/Kubernetes/README.md) | Operators to manage Kubernetes deployments diff --git a/Documentation/Books/Manual/Programs/Starter/Architecture.md b/Documentation/Books/Manual/Programs/Starter/Architecture.md deleted file mode 100644 index 33526cabf72b..000000000000 --- a/Documentation/Books/Manual/Programs/Starter/Architecture.md +++ /dev/null @@ -1,166 +0,0 @@ - -# ArangoDB Starter Architecture - -## What does the Starter do - -The ArangoDB Starter is a program used to create ArangoDB database deployments -on bare-metal (or virtual machines) with ease. -It enables you to create everything from a simple Single server instance -to a full blown Cluster with datacenter to datacenter replication in under 5 minutes. - -The Starter is intended to be used in environments where there is no higher -level orchestration system (e.g. Kubernetes or DC/OS) available. - -## Starter versions - -The Starter is a separate process in a binary called `arangodb` (or `arangodb.exe` on Windows). -This binary has its own version number that is independent of a ArangoDB (database) -version. - -This means that Starter version `a.b.c` can be used to run deployments -of ArangoDB databases with different version. -For example, the Starter with version `0.11.2` can be used to create -ArangoDB deployments with ArangoDB version `3.2.` as well -as deployments with ArangoDB version `3.3.`. - -It also means that you can update the Starter independently from the ArangoDB -database. - -Note that the Starter is also included in all binary ArangoDB packages. - -To find the versions of you Starters & ArangoDB database, run the following commands: - -```bash -# To get the Starter version -arangodb --version -# To get the ArangoDB database version -arangod --version -``` - -## Starter deployment modes - -The Starter supports 3 different modes of ArangoDB deployments: - -1. Single server -1. Active failover -1. Cluster - -Note: Datacenter replication is an option for the `cluster` deployment mode. - -You select one of these modes using the `--starter.mode` command line option. - -Depending on the mode you've selected, the Starter launches one or more -(`arangod` / `arangosync`) server processes. - -No matter which mode you select, the Starter always provides you -a common directory structure for storing the servers data, configuration & log files. - -## Starter operating modes - -The Starter can run as normal processes directly on the host operating system, -or as containers in a docker runtime. - -When running as normal process directly on the host operating system, -the Starter launches the servers as child processes and monitors those. -If one of the server processes terminates, a new one is started automatically. - -When running in a docker container, the Starter launches the servers -as separate docker containers, that share the volume namespace with -the container that runs the Starter. It monitors those containers -and if one terminates, a new container is launched automatically. - -## Starter data-directory - -The Starter uses a single directory with a well known structure to store -all data for its own configuration & logs, as well as the configuration, -data & logs of all servers it starts. - -This data directory is set using the `--starter.data-dir` command line option. -It contains the following files & sub-directories. - -- `setup.json` The configuration of the "cluster of Starters". - For details see below. DO NOT edit this file. -- `arangodb.log` The log file of the Starter -- `single`, `agent`, `coordinator`, `dbserver: directories for - launched servers. These directories contain among others the following files: - - `apps`: A directory with Foxx applications - - `data`: A directory with database data - - `arangod.conf`: The configuration file for the server. Editing this file is possible, but not recommended. - - `arangod.log`: The log file of the server - - `arangod_command.txt`: File containing the exact command line of the started server (for debugging purposes only) - -## Running on multiple machines - -For the `activefailover` & `cluster` mode, it is required to run multiple -Starters, as every Starter will only launch a subset of all servers needed -to form the entire deployment. -For example in `cluster` mode, a Starter will launch a single agent, a single dbserver -and a single coordinator. - -It is the responsibility of the user to run the Starter on multiple machines such -that enough servers are started to form the entire deployment. -The minimum number of Starters needed is 3. - -The Starters running on those machines need to know about each other's existence. -In order to do so, the Starters form a "cluster" of their own (not to be confused -with the ArangoDB database cluster). -This cluster of Starters is formed from the values given to the `--starter.join` -command line option. You should pass the addresses (`:`) of all Starters. - -For example a typical commandline for a cluster deployment looks like this: - -```bash -arangodb --starter.mode=cluster --starter.join=hostA:8528,hostB:8528,hostC:8528 -# this command is run on hostA, hostB and hostC. -``` - -The state of the cluster (of Starters) is stored in a configuration file called -`setup.json` in the data directory of every Starter and the ArangoDB -agency is used to elect a master among all Starters. - -The master Starter is responsible for maintaining the list of all Starters -involved in the cluster and their addresses. The slave Starters (all Starters -except the elected master) fetch this list from the master Starter on regular -basis and store it to its own `setup.json` config file. - -Note: The `setup.json` config file MUST NOT be edited manually. - -## Running on multiple machines (under the hood) - -As mentioned above, when the Starter is used to create an `activefailover` -or `cluster` deployment, it first creates a "cluster" of Starters. - -These are the steps taken by the Starters to bootstrap such a deployment -from scratch. - -1. All Starters are started (either manually or by some supervisor) -1. All Starters try to read their config from `setup.json`. - If that file exists and is valid, this bootstrap-from-scratch process - is aborted and all Starters go directly to the `running` phase described below. -1. All Starters create a unique ID -1. The list of `--starter.join` arguments is sorted -1. All Starters request the unique ID from the first server in the sorted `--starter.join` list, - and compares the result with its own unique ID. -1. The Starter that finds its own unique ID, is continuing as `bootstrap master` - the other Starters are continuing as `bootstrap slaves`. -1. The `bootstrap master` waits for at least 2 `bootstrap slaves` to join it. -1. The `bootstrap slaves` contact the `bootstrap master` to join its cluster of Starters. -1. Once the `bootstrap master` has received enough (at least 2) requests - to join its cluster of Starters, it continues with the `running` phase. -1. The `bootstrap slaves` keep asking the `bootstrap master` about its state. - As soon as they receive confirmation to do so, they also continue with the `running` phase. - -In the `running` phase all Starters launch the desired servers and keeps monitoring those -servers. Once a functional agency is detected, all Starters will try to be -`running master` by trying to write their ID in a well known location in the agency. -The first Starter to succeed in doing so wins this master election. - -The `running master` will keep writing its ID in the agency in order to remaining -the `running master`. Since this ID is written with a short time-to-live, -other Starters are able to detect when the current `running master` has been stopped -or is no longer responsible. In that case the remaining Starters will perform -another master election to decide who will be the next `running master`. - -API requests that involve the state of the cluster of Starters are always answered -by the current `running master`. All other Starters will refer the request to -the current `running master`. diff --git a/Documentation/Books/Manual/Programs/Starter/Options.md b/Documentation/Books/Manual/Programs/Starter/Options.md deleted file mode 100644 index acb1ee28545d..000000000000 --- a/Documentation/Books/Manual/Programs/Starter/Options.md +++ /dev/null @@ -1,387 +0,0 @@ - -# Option reference - -The ArangoDB Starter provides a lot of options to control various aspects -of the cluster or database you want to run. - -Below you'll find a list of all options and their semantics. - -## Common options - -- `--starter.data-dir=path` - -`path` is the directory in which all data is stored. (default "./") - -In the directory, there will be a single file `setup.json` used for -restarts and a directory for each instances that runs on this machine. -Different instances of `arangodb` must use different data directories. - -- `--starter.join=address` - -Join a cluster with master at address `address` (default ""). -Address can be an host address or name, followed with an optional port. - -E.g. these are valid arguments. - -```bash ---starter.join=localhost ---starter.join=localhost:5678 ---starter.join=192.168.23.1:8528 ---starter.join=192.168.23.1 -``` - -- `--starter.local` - -Start a local (test) cluster. Since all servers are running on a single machine -this is really not intended for production setups. - -- `--starter.mode=cluster|single|activefailover` - -Select what kind of database configuration you want. -This can be a `cluster` configuration (which is the default), -a `single` server configuration or a `activefailover` configuration with -2 single services configured to take over when needed. - -Note that when running a `single` server configuration you will lose all -high availability features that a cluster provides you. - -- `--cluster.agency-size=int` - -number of agents in agency (default 3). - -This number has to be positive and odd, and anything beyond 5 probably -does not make sense. The default 3 allows for the failure of one agent. - -- `--starter.address=addr` - -`addr` is the address under which this server is reachable from the -outside. - -Use this option only in the case that `--cluster.agency-size` is set to 1. -In a single agent setup, the sole starter has to start on its own with -no reliable way to learn its own address. Using this option the master will -know under which address it can be reached from the outside. If you specify -`localhost` here, then all instances must run on the local machine. - -- `--starter.host=addr` - -`addr` is the address to which this server binds. (default "0.0.0.0") - -Usually there is no need to specify this option. -Only when you want to bind the starter to specific network device, -would you set this. -Note that setting this option to `127.0.0.1` will make this starter -unreachable for other starters, which is only allowed for -`single` server deployments or when using `--starter.local`. - -- `--docker.image=image` - -`image` is the name of a Docker image to run instead of the normal -executable. For each started instance a Docker container is launched. -Usually one would use the Docker image `arangodb/arangodb`. - -- `--docker.container=containerName` - -`containerName` is the name of a Docker container that is used to run the -executable. If you do not provide this argument but run the starter inside -a docker container, the starter will auto-detect its container name. - -## Authentication options - -The arango starter by default creates a cluster that uses no authentication. - -To create a cluster that uses authentication, create a file containing a random JWT secret (single line) -and pass it through the `--auth.jwt-secret-path` option. - -For example: - -```bash -arangodb create jwt-secret --secret=jwtSecret -arangodb --auth.jwt-secret=./jwtSecret -``` - -All starters used in the cluster must have the same JWT secret. - -To use a JWT secret to access the database, use `arangodb auth header`. -See [Using authentication tokens](./Security.md#using-authentication-tokens) for details. - -## SSL options - -The arango starter by default creates a cluster that uses no unencrypted connections (no SSL). - -To create a cluster that uses encrypted connections, you can use an existing server key file (.pem format) -or let the starter create one for you. - -To use an existing server key file use the `--ssl.keyfile` option like this: - -```bash -arangodb --ssl.keyfile=myServer.pem -``` - -Use [`arangodb create tls keyfile`](./Security.md) to create a server key file. - -To let the starter created a self-signed server key file, use the `--ssl.auto-key` option like this: - -```bash -arangodb --ssl.auto-key -``` - -All starters used to make a cluster must be using SSL or not. -You cannot have one starter using SSL and another not using SSL. - -If you start a starter using SSL, it's own HTTP server (see API) will also -use SSL. - -Note that all starters can use different server key files. - -Additional SSL options: - -- `--ssl.cafile=path` - -Configure the servers to require a client certificate in their communication to the servers using the CA certificate in a file with given path. - -- `--ssl.auto-server-name=name` - -name of the server that will be used in the self-signed certificate created by the `--ssl.auto-key` option. - -- `--ssl.auto-organization=name` - -name of the server that will be used in the self-signed certificate created by the `--ssl.auto-key` option. - -## Other database options - -Options for `arangod` that are not supported by the starter can still be passed to -the database servers using a pass through option. -Every option that start with a pass through prefix is passed through to the commandline -of one or more server instances. - -- `--all.
.=` is pass as `--
.=` to all servers started by this starter. -- `--coordinators.
.=` is passed as `--
.=` to all coordinators started by this starter. -- `--dbservers.
.=` is passed as `--
.=` to all dbservers started by this starter. -- `--agents.
.=` is passed as `--
.=` to all agents started by this starter. - -Some options are essential to the function of the starter. Therefore these options cannot be passed through like this. - -Example: - -To activate HTTP request logging at debug level for all coordinators, use a command like this. - -```bash -arangodb --coordinators.log.level=requests=debug -``` - -## Datacenter to datacenter replication options - -- `--sync.start-master=bool` - -Should an ArangoSync master instance be started (only relevant when starter.sync is enabled, defaults to `true`) - -- `--sync.start-worker=bool` - -Should an ArangoSync worker instance be started (only relevant when starter.sync is enabled, defaults to `true`) - -- `--sync.monitoring.token=` - -Bearer token used to access ArangoSync monitoring endpoints. - -- `--sync.master.jwt-secret=` - -Path of file containing JWT secret used to access the Sync Master (from Sync Worker). - -- `--sync.mq.type=` - -Type of message queue used by the Sync Master (defaults to "direct"). - -- `--sync.server.keyfile=` - -TLS keyfile of local sync master. - -- `--sync.server.client-cafile=` - -CA Certificate used for client certificate verification. - -## Other `arangosync` options - -Options for `arangosync` that are not supported by the starter can still be passed to -the syncmasters & syncworkers using a pass through option. -Every option that start with a pass through prefix is passed through to the commandline -of one or more `arangosync` instances. - -- `--sync.
.=` is pass as `--
.=` to all arangosync instances started by this starter. -- `--syncmasters.
.=` is passed as `--
.=` to all syncmasters started by this starter. -- `--syncworkers.
.=` is passed as `--
.=` to all syncworkers started by this starter. - -Some options are essential to the function of the starter. Therefore these options cannot be passed through like this. - -Example: - -To set a custom token TTL for direct message queue, use a command like this. - -```bash -arangodb --syncmasters.mq.direct-token-ttl=12h ... -``` - -## Esoteric options - -- `--version` - -show the version of the starter. - -- `--starter.port=int` - -port for arangodb master (default 8528). See below under "Technical -explanation as to what happens" for a description of how the ports of -the other servers are derived from this number. - -This is the port used for communication of the `arangodb` instances -amongst each other. - -- `--starter.disable-ipv6=bool` - -if disabled, the starter will configure the `arangod` servers -to bind to address `0.0.0.0` (all IPv4 interfaces) -instead of binding to `[::]` (all IPv4 and all IPv6 interfaces). - -This is useful when IPv6 has actively been disabled on your machine. - -- `--server.arangod=path` - -path to the `arangod` executable (default varies from platform to -platform, an executable is searched in various places). - -This option only has to be specified if the standard search fails. - -- `--server.js-dir=path` - -path to JS library directory (default varies from platform to platform, -this is coupled to the search for the executable). - -This option only has to be specified if the standard search fails. - -- `--server.storage-engine=mmfiles|rocksdb` - -Sets the storage engine used by the `arangod` servers. -The value `rocksdb` is only allowed on `arangod` version 3.2 and up. - -On `arangod` version 3.3 and earlier, the default value is `mmfiles`. -On `arangod` version 3.4 and later, the default value is `rocksdb`. - -- `--cluster.start-coordinator=bool` - -This indicates whether or not a coordinator instance should be started -(default true). - -- `--cluster.start-dbserver=bool` - -This indicates whether or not a DB server instance should be started -(default true). - -- `--server.rr=path` - -path to rr executable to use if non-empty (default ""). Expert and -debugging only. - -- `--log.color=bool` - -If set to `true`, console log output is colorized. -The default is `true` when a terminal is attached to stdin, -`false` otherwise or when running on Windows. - -- `--log.console=bool` - -If set to `true`, log output is written to the console (default `true`). - -- `--log.file=bool` - -If set to `true`, log output is written to the file (default `true`). -The log file, called `arangodb.log`, can be found in the directory -specified using `--log.dir` or if that is not set, the directory -specified using `--starter.data-dir`. - -- `--log.verbose=bool` - -show more information (default `false`). - -- `--log.dir=path` - -set a custom directory to which all log files will be written to. -When using the Starter in docker, make sure that this directory is -mounted as a volume for the Starter. - -Note: When using a custom log directory, all database server files will be named as `arangod--.log`. -The log for the starter itself is still called `arangodb.log`. - -- `--log.rotate-files-to-keep=int` - -set the number of old log files to keep when rotating log files of server components (default 5). - -- `--log.rotate-interval=duration` - -set the interval between rotations of log files of server components (default `24h`). -Use a value of `0` to disable automatic log rotation. - -Note: The starter will always perform log rotation when it receives a `HUP` signal. - -- `--starter.unique-port-offsets=bool` - -If set to true, all port offsets (of slaves) will be made globally unique. -By default (value is false), port offsets will be unique per slave address. - -- `--docker.user=user` - -`user` is an expression to be used for `docker run` with the `--user` -option. One can give a user id or a user id and a group id, separated -by a colon. The purpose of this option is to limit the access rights -of the process in the Docker container. - -- `--docker.endpoint=endpoint` - -`endpoint` is the URL used to reach the docker host. This is needed to run -the executable in docker. The default value is "unix:///var/run/docker.sock". - -- `--docker.imagePullPolicy=Always|IfNotPresent|Never` - -`docker.imagePullPolicy` determines if the docker image is being pull from the docker hub. -If set to `Always`, the image is always pulled and an error causes the starter to fail. -If set to `IfNotPresent`, the image is not pull if it is always available locally. -If set to `Never`, the image is never pulled (when it is not available locally an error occurs). -The default value is `Always` is the `docker.image` has the `:latest` tag or `IfNotPresent` otherwise. - -- `--docker.net-mode=mode` - -If `docker.net-mode` is set, all docker container will be started -with the `--net=` option. - -- `--docker.privileged=bool` - -If `docker.privileged` is set, all docker containers will be started -with the `--privileged` option turned on. - -- `--docker.tty=bool` - -If `docker.tty` is set, all docker containers will be started with a TTY. -If the starter itself is running in a docker container without a TTY -this option is overwritten to `false`. - -- `--starter.debug-cluster=bool` - -IF `starter.debug-cluster` is set, the start will record the status codes it receives -upon "server ready" requests to the log. This option is mainly intended for internal testing. - -## Environment variables - -It is possibe to replace all commandline arguments for the starter with environment variables. -To do so, set an environment variable named `ARANGODB_` + ``, -where all dashes, underscores and dots are replased with underscores. - -E.g. - -```bash -ARANGODB_DOCKER_TTY=true arangodb -``` - -is equal to: - -```bash -arangodb --docker.tty=true -``` diff --git a/Documentation/Books/Manual/Programs/Starter/README.md b/Documentation/Books/Manual/Programs/Starter/README.md deleted file mode 100644 index 4d3116552031..000000000000 --- a/Documentation/Books/Manual/Programs/Starter/README.md +++ /dev/null @@ -1,8 +0,0 @@ - -# ArangoDB Starter - -This chapter documents the _ArangoDB Starter_. - -The _ArangoDB Starter_ is a tool that can help you deploy ArangoDB in an easy way (either in single-instance, active/passive or Cluster mode). - -For a Tutorial, please refer to [this](../../Tutorials/Starter/README.md) section. diff --git a/Documentation/Books/Manual/Programs/Starter/Security.md b/Documentation/Books/Manual/Programs/Starter/Security.md deleted file mode 100644 index 8fb3c6deae25..000000000000 --- a/Documentation/Books/Manual/Programs/Starter/Security.md +++ /dev/null @@ -1,133 +0,0 @@ - -# Security - -Securing an ArangoDB deployment involves encrypting its connections and -authenticated access control. - -The ArangoDB starter provides several command to create the certificates -and tokens needed to do so. - -## Creating certificates - -The starter provides commands to create all certificates needed for an ArangoDB -deployment with optional datacenter to datacenter replication. - -### TLS server certificates - -To create a certificate used for TLS servers in the **keyfile** format, -you need the public key of the CA (`--cacert`), the private key of -the CA (`--cakey`) and one or more hostnames (or IP addresses). -Then run: - -```bash -arangodb create tls keyfile \ - --cacert=my-tls-ca.crt --cakey=my-tls-ca.key \ - --host= \ - --keyfile=my-tls-cert.keyfile -``` - -Make sure to store the generated keyfile (`my-tls-cert.keyfile`) in a safe place. - -To create a certificate used for TLS servers in the **crt** & **key** format, -you need the public key of the CA (`--cacert`), the private key of -the CA (`--cakey`) and one or more hostnames (or IP addresses). -Then run: - -```bash -arangodb create tls certificate \ - --cacert=my-tls-ca.crt --cakey=my-tls-ca.key \ - --host= \ - --cert=my-tls-cert.crt \ - --key=my-tls-cert.key \ -``` - -Make sure to protect and store the generated files (`my-tls-cert.crt` & `my-tls-cert.key`) in a safe place. - -### Client authentication certificates - -To create a certificate used for client authentication in the **keyfile** format, -you need the public key of the CA (`--cacert`), the private key of -the CA (`--cakey`) and one or more hostnames (or IP addresses) or email addresses. -Then run: - -```bash -arangodb create client-auth keyfile \ - --cacert=my-client-auth-ca.crt --cakey=my-client-auth-ca.key \ - [--host= | --email=] \ - --keyfile=my-client-auth-cert.keyfile -``` - -Make sure to protect and store the generated keyfile (`my-client-auth-cert.keyfile`) in a safe place. - -### CA certificates - -To create a CA certificate used to **sign TLS certificates**, run: - -```bash -arangodb create tls ca \ - --cert=my-tls-ca.crt --key=my-tls-ca.key -``` - -Make sure to protect and store both generated files (`my-tls-ca.crt` & `my-tls-ca.key`) in a safe place. - -Note: CA certificates have a much longer lifetime than normal certificates. -Therefore even more care is needed to store them safely. - -To create a CA certificate used to **sign client authentication certificates**, run: - -```bash -arangodb create client-auth ca \ - --cert=my-client-auth-ca.crt --key=my-client-auth-ca.key -``` - -Make sure to protect and store both generated files (`my-client-auth-ca.crt` & `my-client-auth-ca.key`) -in a safe place. - -Note: CA certificates have a much longer lifetime than normal certificates. -Therefore even more care is needed to store them safely. - -## Creating authentication tokens - -JWT tokens are used to authenticate servers (within a cluster) with each other. - -### JWT tokens - -To create a file containing an JWT token, run: - -```bash -arangodb create jwt-secret \ - --secret=my-secret.jwt [--length=32] -``` - -Make sure to protect and store the generated file (`my-secret.jwt`) in a safe place. - -## Using authentication tokens - -ArangoDB deployments that require authentication can be accessed through standard user+password -pairs or using a JWT to get "super-user" access. - -This super-user access is needed to communicate directly with the agency or with any server -in the deployment. -Note that uses super-user access for normal database access is NOT advised. - -To create a JWT from the JWT secret file specified using the `--auth.jwt-secret` option, -use the following command: - -```bash -arangodb auth token --auth.jwt-secret= -``` - -To create a complete HTTP Authorization header that can be passed directly to tools like `curl`, -use the following command: - -```bash -arangodb auth header --auth.jwt-secret= -``` - -Using `curl` with this command looks like this: - -```bash -curl -v -H "$(arangodb auth header --auth.jwt-secret=)" http://:8529/_api/version -``` - -Note the double quotes around `$(...)`. diff --git a/Documentation/Books/Manual/Programs/WebInterface/AqlEditor.md b/Documentation/Books/Manual/Programs/WebInterface/AqlEditor.md deleted file mode 100644 index 0ccc14dd3dcb..000000000000 --- a/Documentation/Books/Manual/Programs/WebInterface/AqlEditor.md +++ /dev/null @@ -1,112 +0,0 @@ -Query View -========== - -The query view offers you three different subviews: - -- Editor -- Running Queries -- Slow Query History - -AQL Query Editor ----------------- - -The web interface offers a AQL Query Editor: - -![Editor Input](images/queryEditorInput.png) - -The editor is split into two parts, the query editor pane and the bind -parameter pane. - -The left pane is your regular query input field, where you can edit and then -execute or explain your queries. By default, the entered bind parameter will -automatically be recognized and shown in the bind parameter table in the right -pane, where you can easily edit them. - -The input fields are equipped with type detection. This means you don't have to -use quote marks around string, just write them as-is. Numbers will be treated -as numbers, *true* and *false* as booleans, *null* as null-type value. Square -brackets can be used to define arrays, and curly braces for objects (keys and -values have to be surrounded by double quotes). This will mostly be what you want. -But if you want to force something to be treated as string, use quotation marks -for the value: - -```js -123 // interpreted as number -"123" // interpreted as string - -["foo", "bar", 123, true] // interpreted as array -['foo', 'bar', 123, true] // interpreted as string -``` - -If you are used to work with JSON, you may want to switch the bind parameter -editor to JSON mode by clicking on the upper right toggle button. You can then -edit the bind parameters in raw JSON format. - -### Custom Queries - -To save the current query use the *Save* button in the top left corner of -the editor or use the shortcut (see below). - -![Custom Queries](images/queryCustoms.png) - -By pressing the *Queries* button in the top left corner of the editor you -activate the custom queries view. Here you can select a previously stored custom -query or one of our query examples. - -Click on a query title to get a code preview. In addition, there are action -buttons to: - -- Copy to editor -- Explain query -- Run query -- Delete query - -For the built-in example queries, there is only *Copy to editor* available. - -To export or import queries to and from JSON you can use the buttons on the -right-hand side. - -### Result - -![Editor Output](images/queryEditorOutput.png) - -Each query you execute or explain opens up a new result box, so you are able -to fire up multiple queries and view their results at the same time. Every query -result box gives you detailed query information and of course the query result -itself. The result boxes can be dismissed individually, or altogether using the -*Remove results* button. The toggle button in the top right corner of each box -switches back and forth between the *Result* and *AQL* query with bind parameters. - -### Spotlight - -![Spotlight](images/querySpotlight.png) - -The spotlight feature opens up a modal view. There you can find all AQL keywords, -AQL functions and collections (filtered by their type) to help you to be more -productive in writing your queries. Spotlight can be opened by the magic wand icon -in the toolbar or via shortcut (see below). - -### AQL Editor Shortcuts - -- Ctrl / Cmd + Return to execute a query -- Ctrl / Cmd + Shift + Return to explain a query -- Ctrl / Cmd + Shift + S to save the current query -- Ctrl / Cmd + Shift + C to toggle comments -- Ctrl + Space to open up the spotlight search -- Ctrl + Cmd + Z to undo last change -- Ctrl + Cmd + Shift + Z to redo last change - -Running Queries ---------------- - -![Running Queries](images/runningQueries.png) - -The *Running Queries* tab gives you a compact overview of all running queries. -By clicking the red minus button, you can abort the execution of a running query. - -Slow Query History ------------------- - -![Slow Queries](images/slowQueries.png) - -The *Slow Query History* tab gives you a compact overview of all past slow queries. diff --git a/Documentation/Books/Manual/Programs/WebInterface/Cluster.md b/Documentation/Books/Manual/Programs/WebInterface/Cluster.md deleted file mode 100644 index af1e0dc30368..000000000000 --- a/Documentation/Books/Manual/Programs/WebInterface/Cluster.md +++ /dev/null @@ -1,61 +0,0 @@ -Cluster -======= - -The cluster section displays statistics about the general cluster performance. - -![Cluster](images/clusterView.png) - -Statistics: - - - Available and missing coordinators - - Available and missing database servers - - Memory usage (percent) - - Current connections - - Data (bytes) - - HTTP (bytes) - - Average request time (seconds) - -Nodes ------ - -### Overview - -The overview shows available and missing coordinators and database servers. - -![Nodes](images/nodesView.png) - -Functions: - -- Coordinator Dashboard: Click on a Coordinator will open a statistics dashboard. - -Information (Coordinator / Database servers): - -- Name -- Endpoint -- Last Heartbeat -- Status -- Health - -### Shards - -The shard section displays all available sharded collections. - -![Shards](images/shardsView.png) - -Functions: - -- Move Shard Leader: Click on a leader database of a shard server will open a move shard dialog. Shards can be - transferred to all available databas servers, except the leading database - server or an available follower. -- Move Shard Follower: Click on a follower database of a shard will open a move shard dialog. Shards can be - transferred to all available databas servers, except the leading database - server or an available follower. -- Rebalance Shards: A new database server will not have any shards. With the - rebalance functionality the cluster will start to rebalance shards including - empty database servers. - -Information (collection): - -- Shard -- Leader (green state: sync is complete) -- Followers diff --git a/Documentation/Books/Manual/Programs/WebInterface/Collections.md b/Documentation/Books/Manual/Programs/WebInterface/Collections.md deleted file mode 100644 index ef82a0518a5d..000000000000 --- a/Documentation/Books/Manual/Programs/WebInterface/Collections.md +++ /dev/null @@ -1,69 +0,0 @@ -Collections -=========== - -The collections section displays all available collections. From here you can -create new collections and jump into a collection for details (click on a -collection tile). - -![Collections](images/collectionsView.png) - -Functions: - - - A: Toggle filter properties - - B: Search collection by name - - D: Create collection - - C: Filter properties - - H: Show collection details (click tile) - -Information: - - - E: Collection type - - F: Collection state(unloaded, loaded, ...) - - G: Collection name - -Collection ----------- - -![Collection](images/collectionView.png) - -There are four view categories: - -1. Content: - - Create a document - - Delete a document - - Filter documents - - Download documents - - Upload documents - -2. Indices: - - Create indices - - Delete indices - -3. Info: - - Detailed collection information and statistics - -3. Settings: - - Configure name, journal size, index buckets, wait for sync - - Delete collection - - Truncate collection - - Unload/Load collection - - Save modifed properties (name, journal size, index buckets, wait for sync) - -Additional information: - -Upload format: - -I. Line-wise -```js -{ "_key": "key1", ... } -{ "_key": "key2", ... } -``` - -II. JSON documents in a list -```js -[ - { "_key": "key1", ... }, - { "_key": "key2", ... } -] -``` - diff --git a/Documentation/Books/Manual/Programs/WebInterface/Dashboard.md b/Documentation/Books/Manual/Programs/WebInterface/Dashboard.md deleted file mode 100644 index de69acb95e78..000000000000 --- a/Documentation/Books/Manual/Programs/WebInterface/Dashboard.md +++ /dev/null @@ -1,32 +0,0 @@ -Dashboard -========= - -The *Dashboard* tab provides statistics which are polled regularly from the -ArangoDB server. - -![Nodes](images/dashboardView.png) - -Requests Statistics: - - - Requests per second - - Request types - - Number of client connections - - Transfer size - - Transfer size (distribution) - - Average request time - - Average request time (distribution) - -System Resources: - -- Number of threads -- Memory -- Virtual size -- Major page faults -- Used CPU time - -Replication: - -- Replication state -- Totals -- Ticks -- Progress diff --git a/Documentation/Books/Manual/Programs/WebInterface/Document.md b/Documentation/Books/Manual/Programs/WebInterface/Document.md deleted file mode 100644 index 763346705975..000000000000 --- a/Documentation/Books/Manual/Programs/WebInterface/Document.md +++ /dev/null @@ -1,18 +0,0 @@ -Document -======== - -The document section offers a editor which let you edit documents and edges of a collection. - -![Document](images/documentView.png) - -Functions: - - - Edit document - - Save document - - Delete docment - - Switch between Tree/Code - Mode - - Create a new document - -Information: - - - Displays: _id, _rev, _key properties diff --git a/Documentation/Books/Manual/Programs/WebInterface/Graphs.md b/Documentation/Books/Manual/Programs/WebInterface/Graphs.md deleted file mode 100644 index 63f8c902b744..000000000000 --- a/Documentation/Books/Manual/Programs/WebInterface/Graphs.md +++ /dev/null @@ -1,78 +0,0 @@ -Graphs -====== - -The *Graphs* tab provides a viewer facility for graph data stored in ArangoDB. -It allows browsing ArangoDB graphs stored in the *_graphs* system collection or -a graph consisting of an arbitrary vertex and [edge collection](../../Appendix/Glossary.md#edge-collection). - -![manage graphs](images/graphsView.png) - -Please note that the graph viewer requires canvas (optional: webgl) support -in your browser. Especially Internet Explorer browsers older than version 9 -are likely to not support this. - -Graph Viewer ------------- - -![display graphs](images/graphViewer.png) - -Top Toolbar Functions: - -- Load full graph (Also nodes without connections will be drawn. Useful during graph modeling setup) -- Take a graph screenshot -- Start full screen mode -- Open graph options menu - -Default Context Menu (mouse-click background): - -- Add a new node -- Close visible context menu(s) - -Node Context Menu (mouse-click node): - -- Delete node -- Edit node -- Expand node (Show all bound edges) -- Draw edge (Connect with another node) -- Set as startnode (The Graph will rerender starting the selected node and given options (graph options menu)) - -Edge Context Menu (mouse-click edge): - -- Edit edge -- Delete edge - -Edge Highlighting (right-mouse-click node): - -- Highlight all edges connected to the node (right-click at the background will remove highlighting) - -![graph context menu](images/graphViewerContextMenu.png) - -### Graph Viewer Options - -Graph Options Menu: - -- Startnode (string - valid node id or space seperated list of id's): Heart of your graph. Rendering and traversing will start from here. Empty value means: a random starting point will be used. -- Layout: Different graph layouting algoritms. No overlap (optimal: big graph), force layout (optimal: medium graph), fruchtermann (optimal: little to medium graph). -- Renderer: Canvas mode allows editing. WebGL currently offers only display mode (a lot faster with much nodes/edges). -- Search depth (number): Search depth which is starting from your start node. -- Limit (number): Limit nodes count. If empty or zero, no limit is set. - -Nodes Options Menu: - -- Label (string): Nodes will be labeled by this attribute. If node attribute is not found, no label will be displayed. -- Add Collection Name: This appends the collection name to the label, if it exists. -- Color By Collections: Should nodes be colorized by their collection? If enabled, node color and node color attribute will be ignored. -- Color: Default node color. -- Color Attribute (string): If an attribute is given, nodes will then be colorized by the attribute. This setting ignores default node color if set. -- Size By Connections: Should nodes be sized by their edges count? If enabled, node sizing attribute will be ignored. -- Sizing Attribute (number): Default node size. Numeric value > 0. - -Edges Options Menu: - -- Label (string): Edges will be labeled by this attribute. If edge attribute is not found, no label will be displayed. -- Add Collection Name: This appends the collection name to the label, if it exists. -- Color By Collections: Should edges be colorized by their collection? If enabled, edge color and edge color attribute will be ignored. -- Color: Default edge color. -- Color Attribute (string): If an attribute is given, edges will then be colorized by the attribute. This setting ignores default node color if set. -- Type: The renderer offers multiple types of rendering. They only differ in their display style, except for the type 'curved'. The curved type -allows to display more than one edges between two nodes. diff --git a/Documentation/Books/Manual/Programs/WebInterface/Logs.md b/Documentation/Books/Manual/Programs/WebInterface/Logs.md deleted file mode 100644 index a6e70ae6d079..000000000000 --- a/Documentation/Books/Manual/Programs/WebInterface/Logs.md +++ /dev/null @@ -1,17 +0,0 @@ -Logs -==== - -The logs section displays all available log entries. Log entries are filterable by -their log level types. - -![Logs](images/logsView.png) - -Functions: - - - Filter log entries by log level (all, info, error, warning, debug) - -Information: - - - Loglevel - - Date - - Message diff --git a/Documentation/Books/Manual/Programs/WebInterface/README.md b/Documentation/Books/Manual/Programs/WebInterface/README.md deleted file mode 100644 index b34518bc2b3a..000000000000 --- a/Documentation/Books/Manual/Programs/WebInterface/README.md +++ /dev/null @@ -1,20 +0,0 @@ -Web Interface -============= - -The ArangoDB server (*arangod*) comes with a built-in web interface for -administration. It lets you manage databases, collections, documents, -users, graphs and more. You can also run and explain queries in a -convenient way. Statistics and server status are provided as well. - -The Web Interface (also Web UI, frontend or *Aardvark*) can be accessed with a -browser under the URL `http://localhost:8529` with default server settings. - -The interface differs for standalone instances and cluster setups. - -Standalone: - -![Standalone Frontend](images/overview.png) - -Cluster: - -![Cluster Frontend](images/clusterView.png) diff --git a/Documentation/Books/Manual/Programs/WebInterface/Services.md b/Documentation/Books/Manual/Programs/WebInterface/Services.md deleted file mode 100644 index 706d37546bfa..000000000000 --- a/Documentation/Books/Manual/Programs/WebInterface/Services.md +++ /dev/null @@ -1,49 +0,0 @@ -Services -======== - -The services section displays all installed Foxx applications. You can create new services -or go into a detailed view of a chosen service. - -![Services](images/servicesView.png) - -Create Service --------------- - -There are four different possibilities to create a new service: - -1. Create service via zip file -2. Create service via github repository -3. Create service via official ArangoDB store -4. Create a blank service from scratch - -![Create Service](images/installService.png) - -Service View ------------- - -This section offers several information about a specific service. - -![Create Service](images/serviceView.png) - -There are four view categories: - -1. Info: - - Displays name, short description, license, version, mode (production, development) - - Offers a button to go to the services interface (if available) - -2. Api: - - Display API as SwaggerUI - - Display API as RAW JSON - -3. Readme: - - Displays the services manual (if available) - -4. Settings: - - Download service as zip file - - Run service tests (if available) - - Run service scripts (if available) - - Configure dependencies (if available) - - Change service parameters (if available) - - Change mode (production, development) - - Replace the service - - Delete the service diff --git a/Documentation/Books/Manual/Programs/WebInterface/Users.md b/Documentation/Books/Manual/Programs/WebInterface/Users.md deleted file mode 100644 index dc5dbed41d4f..000000000000 --- a/Documentation/Books/Manual/Programs/WebInterface/Users.md +++ /dev/null @@ -1,39 +0,0 @@ -Managing Users in the Web Interface -=================================== - -ArangoDB users are globally stored in the \_system database and can only be -mananged while logged on to this database. There you can find the *Users* section: - -![Users](images/users.png) - -General -------- - -Select a user to bring up the *General* tab with the username, name and active -status, as well as options to delete the user or change the password. - -![User General](images/userGeneral.png) - -Permissions ------------ - -Select a user and go to the *Permissions* tab. You will see a list of databases -and their corresponding database access level for that user. - -![User Permissions](images/userPermissions.png) - -Please note that server access level follows from the access level on -the database *\_system*. Furthermore, the default database access level -for this user appear in the artificial row with the database name `*`. - -Below this table is another one for the collection category access -levels. At first, it shows the list of databases, too. If you click on a -database, the list of collections in that database will be open and you -can see the defined collection access levels for each collection of that -database (which can be all unselected which means that nothing is -explicitly set). The default access levels for this user and database -appear in the artificial row with the collection name `*`. - -{% hint 'info' %} -Also see [**Managing Users**](../../Administration/ManagingUsers/README.md) about access levels. -{% endhint %} diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/clusterView.png b/Documentation/Books/Manual/Programs/WebInterface/images/clusterView.png deleted file mode 100644 index 231c05c97e81..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/clusterView.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/collectionView.png b/Documentation/Books/Manual/Programs/WebInterface/images/collectionView.png deleted file mode 100644 index f0c78c10aef3..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/collectionView.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/collectionsView.png b/Documentation/Books/Manual/Programs/WebInterface/images/collectionsView.png deleted file mode 100644 index 35cdb059bc02..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/collectionsView.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/dashboardView.png b/Documentation/Books/Manual/Programs/WebInterface/images/dashboardView.png deleted file mode 100644 index dd889369f9f2..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/dashboardView.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/documentView.png b/Documentation/Books/Manual/Programs/WebInterface/images/documentView.png deleted file mode 100644 index b2a92b35a7b2..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/documentView.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/graphViewer.png b/Documentation/Books/Manual/Programs/WebInterface/images/graphViewer.png deleted file mode 100644 index 990a098c302f..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/graphViewer.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/graphViewerContextMenu.png b/Documentation/Books/Manual/Programs/WebInterface/images/graphViewerContextMenu.png deleted file mode 100644 index 6d07270315b2..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/graphViewerContextMenu.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/graphsView.png b/Documentation/Books/Manual/Programs/WebInterface/images/graphsView.png deleted file mode 100644 index fc1aeceb574f..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/graphsView.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/installService.png b/Documentation/Books/Manual/Programs/WebInterface/images/installService.png deleted file mode 100644 index 52033bf4716e..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/installService.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/loginView.png b/Documentation/Books/Manual/Programs/WebInterface/images/loginView.png deleted file mode 100644 index b8fe8e4384c3..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/loginView.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/logsView.png b/Documentation/Books/Manual/Programs/WebInterface/images/logsView.png deleted file mode 100644 index 9c1841f0b50f..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/logsView.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/nodesView.png b/Documentation/Books/Manual/Programs/WebInterface/images/nodesView.png deleted file mode 100644 index 24c884f0c0dc..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/nodesView.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/overview.png b/Documentation/Books/Manual/Programs/WebInterface/images/overview.png deleted file mode 100644 index 3c06314d631a..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/overview.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/queryCustoms.png b/Documentation/Books/Manual/Programs/WebInterface/images/queryCustoms.png deleted file mode 100644 index bbf68b5f7119..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/queryCustoms.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/queryEditorInput.png b/Documentation/Books/Manual/Programs/WebInterface/images/queryEditorInput.png deleted file mode 100644 index f7de50f59120..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/queryEditorInput.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/queryEditorOutput.png b/Documentation/Books/Manual/Programs/WebInterface/images/queryEditorOutput.png deleted file mode 100644 index 3e8aebf15f80..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/queryEditorOutput.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/querySpotlight.png b/Documentation/Books/Manual/Programs/WebInterface/images/querySpotlight.png deleted file mode 100644 index 2f80e989d47f..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/querySpotlight.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/runningQueries.png b/Documentation/Books/Manual/Programs/WebInterface/images/runningQueries.png deleted file mode 100644 index bcd93d50c575..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/runningQueries.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/selectDBView.png b/Documentation/Books/Manual/Programs/WebInterface/images/selectDBView.png deleted file mode 100644 index f470b4c06bcb..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/selectDBView.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/serviceView.png b/Documentation/Books/Manual/Programs/WebInterface/images/serviceView.png deleted file mode 100644 index 2d3918bc592a..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/serviceView.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/servicesView.png b/Documentation/Books/Manual/Programs/WebInterface/images/servicesView.png deleted file mode 100644 index d5c05ceb3416..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/servicesView.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/shardsView.png b/Documentation/Books/Manual/Programs/WebInterface/images/shardsView.png deleted file mode 100644 index 79b69d6cd531..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/shardsView.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/slowQueries.png b/Documentation/Books/Manual/Programs/WebInterface/images/slowQueries.png deleted file mode 100644 index 5b7169fc852e..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/slowQueries.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/userGeneral.png b/Documentation/Books/Manual/Programs/WebInterface/images/userGeneral.png deleted file mode 100644 index e2dd80936720..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/userGeneral.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/userPermissions.png b/Documentation/Books/Manual/Programs/WebInterface/images/userPermissions.png deleted file mode 100644 index ccb3f8314f9a..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/userPermissions.png and /dev/null differ diff --git a/Documentation/Books/Manual/Programs/WebInterface/images/users.png b/Documentation/Books/Manual/Programs/WebInterface/images/users.png deleted file mode 100644 index a50c629b44c4..000000000000 Binary files a/Documentation/Books/Manual/Programs/WebInterface/images/users.png and /dev/null differ diff --git a/Documentation/Books/Manual/README.md b/Documentation/Books/Manual/README.md deleted file mode 100644 index 7c8270c478c9..000000000000 --- a/Documentation/Books/Manual/README.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -page-toc: - disable: true ---- -ArangoDB VERSION_NUMBER Documentation -===================================== - -Welcome to the ArangoDB documentation! - -{% hint 'info' %} -New and eager to try out ArangoDB? Start right away with our beginner's guide: -[**Getting Started**](GettingStarted/README.md) -{% endhint %} - -Structure ---------- - -The documentation is organized in five handbooks: - -- This manual describes ArangoDB and its features in detail for you as a user, - developer and administrator. -- The [AQL handbook](../AQL/index.html) explains ArangoDB's query language AQL. -- The [HTTP handbook](../HTTP/index.html) describes the internal API of ArangoDB - that is used to communicate with clients. In general, the HTTP handbook will be - of interest to driver developers. If you use any of the existing drivers for - the language of your choice, you can skip this handbook. -- Our [Cookbook](../Cookbook/index.html) with recipes for specific problems and - solutions. -- The [Driver handbook](../Drivers/index.html) includes the documentation of the - available official ArangoDB drivers and integrations, and an overview of the community - drivers. - -Features are illustrated with interactive usage examples; you can cut'n'paste them -into [arangosh](Programs/Arangosh/README.md) to try them out. The HTTP -[REST-API](../HTTP/index.html) for driver developers is demonstrated with cut'n'paste -recipes intended to be used with the [cURL](http://curl.haxx.se). Drivers may provide -their own examples based on these .js based examples to improve understandability -for their respective users, i.e. for the [java driver](https://github.com/arangodb/arangodb-java-driver#learn-more) -some of the samples are re-implemented. - -Key Features ------------- - -ArangoDB is a native multi-model, open-source database with flexible data models for documents, graphs, and key-values. Build high performance applications using a convenient SQL-like query language or JavaScript extensions. Use ACID transactions if you require them. Scale horizontally and vertically with a few mouse clicks. - -Key features include: - -* installing ArangoDB on a [**cluster**](Deployment/README.md) is as easy as installing an app on your mobile -* [**Flexible data modeling**](DataModeling/README.md): model your data as combination of key-value pairs, documents or graphs - perfect for social relations -* [**Powerful query language**](../AQL/index.html) (AQL) to retrieve and modify data -* Use ArangoDB as an [**application server**](Foxx/README.md) and fuse your application and database together for maximal throughput -* [**Transactions**](Transactions/README.md): run queries on multiple documents or collections with optional transactional consistency and isolation -* [**Replication** and **Sharding**](Administration/README.md): set up the database in a master-slave configuration or spread bigger datasets across multiple servers -* Configurable **durability**: let the application decide if it needs more durability or more performance -* No-nonsense storage: ArangoDB uses all of the power of **modern storage hardware**, like SSD and large caches -* JavaScript for all: **no language zoo**, you can use one language from your browser to your back-end -* ArangoDB can be easily deployed as a [**fault-tolerant distributed state machine**](Deployment/StandaloneAgency/README.md), which can serve as the animal brain of distributed appliances -* It is **open source** (Apache License 2.0) - -Community ---------- - -If you have questions regarding ArangoDB, Foxx, drivers, or this documentation don't hesitate to contact us on: - -- [GitHub](https://github.com/arangodb/arangodb/issues) for issues - and misbehavior or [pull requests](https://www.arangodb.com/community/) -- [Google Groups](https://groups.google.com/forum/?hl=de#!forum/arangodb) for discussions about ArangoDB in general or to announce your new Foxx App -- [StackOverflow](http://stackoverflow.com/questions/tagged/arangodb) for questions about AQL, usage scenarios etc. -- [Slack](http://slack.arangodb.com), our community chat - -When reporting issues, please describe: - -- the environment you run ArangoDB in -- the ArangoDB version you use -- whether you're using Foxx -- the client you're using -- which parts of the documentation you're working with (link) -- what you expect to happen -- what is actually happening - -We will respond as soon as possible. diff --git a/Documentation/Books/Manual/ReleaseNotes/21.md b/Documentation/Books/Manual/ReleaseNotes/21.md deleted file mode 100644 index 44382b5dcaa5..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/21.md +++ /dev/null @@ -1,4 +0,0 @@ -Version 2.1 -=========== - -- [What's New in 2.1](NewFeatures21.md) diff --git a/Documentation/Books/Manual/ReleaseNotes/22.md b/Documentation/Books/Manual/ReleaseNotes/22.md deleted file mode 100644 index e1084506be8f..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/22.md +++ /dev/null @@ -1,4 +0,0 @@ -Version 2.2 -=========== - -- [What's New in 2.2](NewFeatures22.md) diff --git a/Documentation/Books/Manual/ReleaseNotes/23.md b/Documentation/Books/Manual/ReleaseNotes/23.md deleted file mode 100644 index 09b8b5f227ea..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/23.md +++ /dev/null @@ -1,5 +0,0 @@ -Version 2.3 -=========== - -- [What's New in 2.3](NewFeatures23.md) -- [Incompatible changes in 2.3](UpgradingChanges23.md) diff --git a/Documentation/Books/Manual/ReleaseNotes/24.md b/Documentation/Books/Manual/ReleaseNotes/24.md deleted file mode 100644 index 615d9742c217..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/24.md +++ /dev/null @@ -1,5 +0,0 @@ -Version 2.4 -=========== - -- [What's New in 2.4](NewFeatures24.md) -- [Incompatible changes in 2.4](UpgradingChanges24.md) diff --git a/Documentation/Books/Manual/ReleaseNotes/25.md b/Documentation/Books/Manual/ReleaseNotes/25.md deleted file mode 100644 index 4f8ff8c8194b..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/25.md +++ /dev/null @@ -1,5 +0,0 @@ -Version 2.5 -=========== - -- [What's New in 2.5](NewFeatures25.md) -- [Incompatible changes in 2.5](UpgradingChanges25.md) diff --git a/Documentation/Books/Manual/ReleaseNotes/26.md b/Documentation/Books/Manual/ReleaseNotes/26.md deleted file mode 100644 index 6708196a2d86..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/26.md +++ /dev/null @@ -1,5 +0,0 @@ -Version 2.6 -=========== - -- [What's New in 2.6](NewFeatures26.md) -- [Incompatible changes in 2.6](UpgradingChanges26.md) diff --git a/Documentation/Books/Manual/ReleaseNotes/27.md b/Documentation/Books/Manual/ReleaseNotes/27.md deleted file mode 100644 index 0e2c11cdd2cb..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/27.md +++ /dev/null @@ -1,5 +0,0 @@ -Version 2.7 -=========== - -- [What's New in 2.7](NewFeatures27.md) -- [Incompatible changes in 2.7](UpgradingChanges27.md) diff --git a/Documentation/Books/Manual/ReleaseNotes/28.md b/Documentation/Books/Manual/ReleaseNotes/28.md deleted file mode 100644 index 043ccd348a8b..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/28.md +++ /dev/null @@ -1,5 +0,0 @@ -Version 2.8 -=========== - -- [What's New in 2.8](NewFeatures28.md) -- [Incompatible changes in 2.8](UpgradingChanges28.md) diff --git a/Documentation/Books/Manual/ReleaseNotes/30.md b/Documentation/Books/Manual/ReleaseNotes/30.md deleted file mode 100644 index 72b67354d1a3..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/30.md +++ /dev/null @@ -1,5 +0,0 @@ -Version 3.0 -=========== - -- [What's New in 3.0](NewFeatures30.md) -- [Incompatible changes in 3.0](UpgradingChanges30.md) diff --git a/Documentation/Books/Manual/ReleaseNotes/31.md b/Documentation/Books/Manual/ReleaseNotes/31.md deleted file mode 100644 index 2772be6fbb4b..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/31.md +++ /dev/null @@ -1,5 +0,0 @@ -Version 3.1 -=========== - -- [What's New in 3.1](NewFeatures31.md) -- [Incompatible changes in 3.1](UpgradingChanges31.md) diff --git a/Documentation/Books/Manual/ReleaseNotes/32.md b/Documentation/Books/Manual/ReleaseNotes/32.md deleted file mode 100644 index 5043b8b05300..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/32.md +++ /dev/null @@ -1,6 +0,0 @@ -Version 3.2 -=========== - -- [What's New in 3.2](NewFeatures32.md) -- [Known Issues in 3.2](KnownIssues32.md) -- [Incompatible changes in 3.2](UpgradingChanges32.md) diff --git a/Documentation/Books/Manual/ReleaseNotes/33.md b/Documentation/Books/Manual/ReleaseNotes/33.md deleted file mode 100644 index f1e9bdd158db..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/33.md +++ /dev/null @@ -1,6 +0,0 @@ -Version 3.3 -=========== - -- [What's New in 3.3](NewFeatures33.md) -- [Known Issues in 3.3](KnownIssues33.md) -- [Incompatible changes in 3.3](UpgradingChanges33.md) diff --git a/Documentation/Books/Manual/ReleaseNotes/34.md b/Documentation/Books/Manual/ReleaseNotes/34.md deleted file mode 100644 index 7ff17795ea86..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/34.md +++ /dev/null @@ -1,6 +0,0 @@ -Version 3.4 -=========== - -- [What's New in 3.4](NewFeatures34.md) -- [Known Issues in 3.4](KnownIssues34.md) -- [Incompatible changes in 3.4](UpgradingChanges34.md) diff --git a/Documentation/Books/Manual/ReleaseNotes/35.md b/Documentation/Books/Manual/ReleaseNotes/35.md deleted file mode 100644 index 645a0edb5aef..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/35.md +++ /dev/null @@ -1,6 +0,0 @@ -Version 3.5 -=========== - -- [What's New in 3.5](NewFeatures35.md) -- [Known Issues in 3.5](KnownIssues35.md) -- [Incompatible changes in 3.5](UpgradingChanges35.md) diff --git a/Documentation/Books/Manual/ReleaseNotes/KnownIssues32.md b/Documentation/Books/Manual/ReleaseNotes/KnownIssues32.md deleted file mode 100644 index 927f1ba43363..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/KnownIssues32.md +++ /dev/null @@ -1,107 +0,0 @@ -Known Issues in ArangoDB 3.2 -============================ - -The following known issues are present in this version of ArangoDB and will be fixed -in follow-up releases: - -RocksDB storage engine ----------------------- - -The RocksDB storage engine is intentionally missing the following features that -are present in the MMFiles engine: - -* the datafile debugger (arango-dfdb) cannot be used with this storage engine - - RocksDB has its own crash recovery so using the dfdb will not make any sense here. - -* APIs that return collection properties or figures will return slightly different - attributes for the RocksDB engine than for the MMFiles engine. For example, the - attributes `journalSize`, `doCompact`, `indexBuckets` and `isVolatile` are present - in the MMFiles engine but not in the RocksDB engine. The memory usage figures reported - for collections in the RocksDB engine are estimate values, whereas they are - exact for the MMFiles engine. - -* the RocksDB engine does not support some operations which only make sense in the - context of the MMFiles engine. These are: - - - the `rotate` method on collections - - the `flush` method for WAL files - -* the RocksDB storage engine does not support volatile collections - -* transactions are limited in size. Transactions that get too big (in terms of - number of operations involved or the total size of data modified by the transaction) - will be committed automatically. Effectively this means that big user transactions - are split into multiple smaller RocksDB transactions that are committed individually. - The entire user transaction will not necessarily have ACID properties in this case. - - The threshold values for transaction sizes can be configured globally using the - startup options - - * `--rocksdb.intermediate-commit-size`: if the size of all operations in a transaction - reaches this threshold, the transaction is committed automatically and a new transaction - is started. The value is specified in bytes. - - * `--rocksdb.intermediate-commit-count`: if the number of operations in a transaction - reaches this value, the transaction is committed automatically and a new transaction - is started. - - * `--rocksdb.max-transaction-size`: this is an upper limit for the total number of bytes - of all operations in a transaction. If the operations in a transaction consume more - than this threshold value, the transaction will automatically abort with error 32 - ("resource limit exceeded"). - - It is also possible to override these thresholds per transaction. - -The following known issues will be resolved in future releases: - -* the RocksDB engine is not yet performance-optimized and potentially not well configured - -* collections for which a geo index is present will use collection-level write locks - even with the RocksDB engine. Reads from these collections can still be done in parallel - but no writes - -* modifying documents in a collection with a geo index will cause multiple additional - writes to RocksDB for maintaining the index structures - -* the number of documents reported for collections (`db..count()`) may be - slightly wrong during transactions if there are parallel transactions ongoing for the - same collection that also modify the number of documents - -* the `any` operation to provide a random document from a collection is supported - by the RocksDB engine but the operation has much higher algorithmic complexity than - in the MMFiles engine. It is therefore discouraged to call it for cases other than manual - inspection of a few documents in a collection - -* AQL queries in the cluster still issue an extra locking HTTP request per shard though - this would not be necessary for the RocksDB engine in most cases - -Installer ---------- - -* Upgrading from 3.1 to 3.2 on Windows requires the user to manually copy the database directory - to the new location and run an upgrade on the database. Please consult the - [Documentation](../Installation/Windows.md) - for detailed instructions. - -System Integration ------------------- - -* On some Linux systems systemd and system v might report that the arangodb - service is in good condition when it could not be started. In this case the - user needs to check `/var/log/arangodb3` for further information about the - failed startup. - -macOS ------ - - * Storage engine is not changeable on an existing database. Currently only the - initial selection of the storage engine is supported. - In order to use another storage engine, you have to delete your ArangoDB - application (macOS Application Folder) -   and `/Users//Library/ArangoDB` folder. - -OpenSSL 1.1 ------------ - - * ArangoDB v3.2 has been tested with OpenSSL 1.0 only and won't build against 1.1 when compiling on your own. diff --git a/Documentation/Books/Manual/ReleaseNotes/KnownIssues33.md b/Documentation/Books/Manual/ReleaseNotes/KnownIssues33.md deleted file mode 100644 index 468e52da12d4..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/KnownIssues33.md +++ /dev/null @@ -1,16 +0,0 @@ -Known Issues in ArangoDB 3.3 -============================ - -This page lists important issues affecting the 3.3.x versions of the ArangoDB suite of products. -It is not a list of all open issues. - -Critical issues (ArangoDB Technical & Security Alerts) are also found at [arangodb.com/alerts](https://www.arangodb.com/alerts/). - -| Issue | -|------------| -| **Date Added:** 2018-12-04
**Component:** arangod
**Deployment Mode:** All
**Description:** Parallel creation of collections using multiple client connections with the same database user may spuriously fail with "Could not update user due to conflict" warnings when setting user permissions on the new collections. A follow-up effect of this may be that access to the just-created collection is denied.
**Affected Versions:** 3.3.x (all)
**Fixed in Versions:** -
**Reference:** [arangodb/arangodb#5342](https://github.com/arangodb/arangodb/issues/5342) | -| **Date Added:** 2018-11-30
**Component:** arangod
**Deployment Mode:** All
**Description:** Wrong suggestion printed in the log on how to optimize an OS setting, if followed, could cause ArangoDB to run into problems as the number of memory mappings will keep growing
**Affected Versions:** 3.3.0 to 3.3.19
**Fixed in Versions:** 3.3.20
**Reference:** [https://www.arangodb.com/alerts/tech03/](https://www.arangodb.com/alerts/tech03/) | -| **Date Added:** 2018-11-16
**Component:** Backup/Restore
**Deployment Mode:** All
**Description:** Users not included in the backup if _--server.authentication = true_
**Affected Versions:** 3.3.0 to 3.3.13
**Fixed in Versions:** 3.3.14
**Reference:** [https://www.arangodb.com/alerts/tech02/](https://www.arangodb.com/alerts/tech02/) | -| **Date Added:** 2018-11-03
**Component:** Security
**Deployment Mode:** All
**Description:** Unauthorized access to ArangoDB when using LDAP authentication
**Affected Versions:** 3.3.0 to 3.3.18
**Fixed in Versions:** 3.3.19
**Reference:** [https://www.arangodb.com/alerts/sec01/](https://www.arangodb.com/alerts/sec01/) | -| **Date Added:** 2018-04-09
**Component:** Storage
**Deployment Mode:** Single Instance
**Description:** Data corruption could happen under Linux
**Affected Versions:** 3.3.0
**Fixed in Versions:** 3.3.1
**Reference:** [https://www.arangodb.com/alerts/tech01/](https://www.arangodb.com/alerts/tech01/) | -| **Date Added:** 2019-02-18
**Component:** arangod
**Deployment Mode:** All
**Description:** There is a clock overflow bug within Facebook's RocksDB storage engine for Windows. The problem manifests under heavy write loads, including long imports. The Windows server will suddenly block all writes for minutes or hours, then begin working again just fine. An immediate workaround is to change the server configuration:
[rocksdb]
throttle = false
**Affected Versions:** all 3.x versions (Windows only)
**Fixed in Versions:** 3.3.23, 3.4.4
**Reference:** [facebook/rocksdb#4983](https://github.com/facebook/rocksdb/issues/4983) | diff --git a/Documentation/Books/Manual/ReleaseNotes/KnownIssues34.md b/Documentation/Books/Manual/ReleaseNotes/KnownIssues34.md deleted file mode 100644 index 07378b52520c..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/KnownIssues34.md +++ /dev/null @@ -1,41 +0,0 @@ -Known Issues in ArangoDB 3.4 -============================ - -This page lists important issues affecting the 3.4.x versions of the ArangoDB suite of products. -It is not a list of all open issues. - -Critical issues (ArangoDB Technical & Security Alerts) are also found at [arangodb.com/alerts](https://www.arangodb.com/alerts/). - -ArangoSearch ------------- - -| Issue | -|------------| -| **Date Added:** 2018-12-03
**Component:** ArangoSearch
**Deployment Mode:** Cluster
**Description:** Score values evaluated by corresponding score functions (BM25/TFIDF) may differ in single-server and cluster with a collection having more than 1 shard
**Affected Versions:** 3.4.0-RC.5
**Fixed in Versions:** -
**Reference:** [arangodb/backlog#508](https://github.com/arangodb/backlog/issues/508) (internal) | -| **Date Added:** 2018-12-03
**Component:** ArangoSearch
**Deployment Mode:** Cluster
**Description:** ArangoSearch index consolidation does not work during creation of a link on existing collection which may lead to massive file descriptors consumption
**Affected Versions:** 3.4.0-RC.5
**Fixed in Versions:** -
**Reference:** [arangodb/backlog#509](https://github.com/arangodb/backlog/issues/509) (internal) | -| **Date Added:** 2018-12-03
**Component:** ArangoSearch
**Deployment Mode:** Cluster
**Description:** Long-running DML transactions on collections (linked with ArangoSearch view) block "ArangoDB flush thread" making impossible to refresh data "visible" by a view
**Affected Versions:** 3.4.0-RC.5
**Fixed in Versions:** -
**Reference:** [arangodb/backlog#510](https://github.com/arangodb/backlog/issues/510) (internal) | -| **Date Added:** 2018-12-03
**Component:** ArangoSearch
**Deployment Mode:** All
**Description:** ArangoSearch index format included starting from 3.4.0-RC.4 is incompatible to earlier released 3.4.0 release candidates. Dump and restore is needed when upgrading from 3.4.0-RC.4 to a newer 3.4.0.x release
**Affected Versions:** 3.4.0-RC.5
**Fixed in Versions:** -
**Reference:** N/A | -| **Date Added:** 2018-12-03
**Component:** ArangoSearch
**Deployment Mode:** All
**Description:** RocksDB recovery fails sometimes after renaming a view
**Affected Versions:** 3.4.0-RC.5
**Fixed in Versions:** -
**Reference:** [arangodb/backlog#469](https://github.com/arangodb/backlog/issues/469) (internal) | -| **Date Added:** 2018-12-03
**Component:** ArangoSearch
**Deployment Mode:** All
**Description:** ArangoSearch ignores `_id` attribute even if `includeAllFields` is set to `true`
**Affected Versions:** 3.4.0-RC.5
**Fixed in Versions:** -
**Reference:** [arangodb/backlog#445](https://github.com/arangodb/backlog/issues/445) (internal) | -| **Date Added:** 2018-12-03
**Component:** ArangoSearch
**Deployment Mode:** All
**Description:** Using a loop variable in expressions within a corresponding SEARCH condition is not supported
**Affected Versions:** 3.4.0-RC.5
**Fixed in Versions:** -
**Reference:** [arangodb/backlog#318](https://github.com/arangodb/backlog/issues/318) (internal) | -| **Date Added:** 2018-12-03
**Component:** ArangoSearch
**Deployment Mode:** All
**Description:** Using score functions (BM25/TFIDF) in ArangoDB expression is not supported
**Affected Versions:** 3.4.0-RC.5
**Fixed in Versions:** -
**Reference:** [arangodb/backlog#316](https://github.com/arangodb/backlog/issues/316) (internal) | -| **Date Added:** 2018-12-03
**Component:** ArangoSearch
**Deployment Mode:** All
**Description:** ArangoSearch index format included starting from 3.4.0-RC.3 is incompatible to earlier released 3.4.0 release candidates. Dump and restore is needed when upgrading from 3.4.0-RC.2 to a newer 3.4.0.x release
**Affected Versions:** 3.4.0-RC.5
**Fixed in Versions:** -
**Reference:** N/A | - - -AQL ---- - -| Issue | -|------------| -| **Date Added:** 2018-09-05
**Component:** AQL
**Deployment Mode:** Cluster
**Description:** In a very uncommon edge case there is an issue with an optimization rule in the cluster. If you are running a cluster and use a custom shard key on a collection (default is `_key`) **and** you provide a wrong shard key in a modifying query (`UPDATE`, `REPLACE`, `DELETE`) **and** the wrong shard key is on a different shard than the correct one, a `DOCUMENT NOT FOUND` error is returned instead of a modification (example query: `UPDATE { _key: "123", shardKey: "wrongKey"} WITH { foo: "bar" } IN mycollection`). Note that the modification always happens if the rule is switched off, so the suggested workaround is to [deactivate the optimizing rule](../../AQL/ExecutionAndPerformance/Optimizer.html#turning-specific-optimizer-rules-off) `restrict-to-single-shard`.
**Affected Versions:** 3.4.0-RC.5
**Fixed in Versions:** -
**Reference:** [arangodb/arangodb#6399](https://github.com/arangodb/arangodb/issues/6399) | - - -Other ------ - -| Issue | -|------------| -| **Date Added:** 2018-12-04
**Component:** arangod
**Deployment Mode:** All
**Description:** Parallel creation of collections using multiple client connections with the same database user may spuriously fail with "Could not update user due to conflict" warnings when setting user permissions on the new collections. A follow-up effect of this may be that access to the just-created collection is denied.
**Affected Versions:** 3.4.0
**Fixed in Versions:** 3.4.1
**Reference:** [arangodb/arangodb#5342](https://github.com/arangodb/arangodb/issues/5342) | -| **Date Added:** 2019-02-18
**Component:** arangod
**Deployment Mode:** All
**Description:** There is a clock overflow bug within Facebook's RocksDB storage engine for Windows. The problem manifests under heavy write loads, including long imports. The Windows server will suddenly block all writes for minutes or hours, then begin working again just fine. An immediate workaround is to change the server configuration:
[rocksdb]
throttle = false
**Affected Versions:** all 3.x versions (Windows only)
**Fixed in Versions:** 3.3.23, 3.4.4
**Reference:** [facebook/rocksdb#4983](https://github.com/facebook/rocksdb/issues/4983) | -| **Date Added:** 2019-03-13
**Component:** arangod
**Deployment Mode:** Active Failover
**Description:** A full resync is triggered after a failover, when the former leader instance is brought back online. A full resync may even occur twice sporadically.
**Affected Versions:** all 3.4.x versions
**Fixed in Versions:** 3.4.5
**Reference:** [arangodb/planning#3757](https://github.com/arangodb/planning/issues/3757) (internal) | -| **Date Added:** 2019-03-13
**Component:** arangod
**Deployment Mode:** Active Failover
**Description:** The leader instance may hang on shutdown. This behavior was observed in an otherwise successful failover.
**Affected Versions:** all 3.4.x versions
**Fixed in Versions:** -
**Reference:** [arangodb/planning#3756](https://github.com/arangodb/planning/issues/3756) (internal) | diff --git a/Documentation/Books/Manual/ReleaseNotes/KnownIssues35.md b/Documentation/Books/Manual/ReleaseNotes/KnownIssues35.md deleted file mode 100644 index 2130a64ed914..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/KnownIssues35.md +++ /dev/null @@ -1,22 +0,0 @@ -Known Issues in ArangoDB 3.5 -============================ - -This page lists important issues affecting the 3.5.x versions of the ArangoDB suite of products. -It is not a list of all open issues. - -Critical issues (ArangoDB Technical & Security Alerts) are also found at [arangodb.com/alerts](https://www.arangodb.com/alerts/). - -ArangoSearch ------------- - -| Issue | -|------------| -| **Date Added:** 2018-12-19
**Component:** ArangoSearch
**Deployment Mode:** Single-server
**Description:** Value of `_id` attribute indexed by ArangoSearch view may become inconsistent after renaming a collection
**Affected Versions:** >= 3.5.0
**Fixed in Versions:** -
**Reference:** [arangodb/backlog#514](https://github.com/arangodb/backlog/issues/514) (internal) | -| **Date Added:** 2018-12-03
**Component:** ArangoSearch
**Deployment Mode:** Cluster
**Description:** Score values evaluated by corresponding score functions (BM25/TFIDF) may differ in single-server and cluster with a collection having more than 1 shard
**Affected Versions:** 3.4.0-RC.5
**Fixed in Versions:** -
**Reference:** [arangodb/backlog#508](https://github.com/arangodb/backlog/issues/508) (internal) | -| **Date Added:** 2018-12-03
**Component:** ArangoSearch
**Deployment Mode:** Cluster
**Description:** ArangoSearch index consolidation does not work during creation of a link on existing collection which may lead to massive file descriptors consumption
**Affected Versions:** 3.4.0-RC.5
**Fixed in Versions:** -
**Reference:** [arangodb/backlog#509](https://github.com/arangodb/backlog/issues/509) (internal) | -| **Date Added:** 2018-12-03
**Component:** ArangoSearch
**Deployment Mode:** Cluster
**Description:** Long-running DML transactions on collections (linked with ArangoSearch view) block "ArangoDB flush thread" making impossible to refresh data "visible" by a view
**Affected Versions:** 3.4.0-RC.5
**Fixed in Versions:** -
**Reference:** [arangodb/backlog#510](https://github.com/arangodb/backlog/issues/510) (internal) | -| **Date Added:** 2018-12-03
**Component:** ArangoSearch
**Deployment Mode:** All
**Description:** ArangoSearch index format included starting from 3.4.0-RC.4 is incompatible to earlier released 3.4.0 release candidates. Dump and restore is needed when upgrading from 3.4.0-RC.4 to a newer 3.4.0.x release
**Affected Versions:** 3.4.0-RC.5
**Fixed in Versions:** -
**Reference:** N/A | -| **Date Added:** 2018-12-03
**Component:** ArangoSearch
**Deployment Mode:** All
**Description:** RocksDB recovery fails sometimes after renaming a view
**Affected Versions:** 3.4.0-RC.5
**Fixed in Versions:** -
**Reference:** [arangodb/backlog#469](https://github.com/arangodb/backlog/issues/469) (internal) | -| **Date Added:** 2018-12-03
**Component:** ArangoSearch
**Deployment Mode:** All
**Description:** Using a loop variable in expressions within a corresponding SEARCH condition is not supported
**Affected Versions:** 3.4.0-RC.5
**Fixed in Versions:** -
**Reference:** [arangodb/backlog#318](https://github.com/arangodb/backlog/issues/318) (internal) | -| **Date Added:** 2018-12-03
**Component:** ArangoSearch
**Deployment Mode:** All
**Description:** Using score functions (BM25/TFIDF) in ArangoDB expression is not supported
**Affected Versions:** 3.4.0-RC.5
**Fixed in Versions:** -
**Reference:** [arangodb/backlog#316](https://github.com/arangodb/backlog/issues/316) (internal) | -| **Date Added:** 2018-12-03
**Component:** ArangoSearch
**Deployment Mode:** All
**Description:** ArangoSearch index format included starting from 3.4.0-RC.3 is incompatible to earlier released 3.4.0 release candidates. Dump and restore is needed when upgrading from 3.4.0-RC.2 to a newer 3.4.0.x release
**Affected Versions:** 3.4.0-RC.5
**Fixed in Versions:** -
**Reference:** N/A | diff --git a/Documentation/Books/Manual/ReleaseNotes/NewFeatures21.md b/Documentation/Books/Manual/ReleaseNotes/NewFeatures21.md deleted file mode 100644 index ec40280f9e26..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/NewFeatures21.md +++ /dev/null @@ -1,310 +0,0 @@ -Features and Improvements in ArangoDB 2.1 -========================================= - -The following list shows in detail which features have been added or improved in -ArangoDB 2.1. ArangoDB 2.1 also contains several bugfixes that are not listed -here. - -New Edges Index ---------------- - -The edges index (used to store connections between nodes in a graph) internally -uses a new data structure. This data structure improves the performance when -populating the edge index (i.e. when loading an edge collection). For large -graphs loading can be 20 times faster than with ArangoDB 2.0. - -Additionally, the new index fixes performance problems that occurred when many -duplicate `_from` or `_to` values were contained in the index. Furthermore, the -new index supports faster removal of edges. - -Finally, when loading an existing collection and building the edges index for -the collection, less memory re-allocations will be performed. - -Overall, this should considerably speed up loading edge collections. - -The new index type replaces the old edges index type automatically, without any -changes being required by the end user. - -The API of the new index is compatible with the API of the old index. Still it -is possible that the new index returns edges in a different order than the old -index. This is still considered to be compatible because the old index had never -guaranteed any result order either. - -AQL Improvements ----------------- - -AQL offers functionality to work with dates. Dates are no data types of their own -in AQL (neither they are in JSON, which is often used as a format to ship data -into and out of ArangoDB). Instead, dates in AQL are internally represented by -either numbers (timestamps) or strings. The date functions in AQL provide -mechanisms to convert from a numeric timestamp to a string representation and -vice versa. - -There are two date functions in AQL to create dates for further use: - -- `DATE_TIMESTAMP(date)` Creates a UTC timestamp value from `date` - -- `DATE_TIMESTAMP(year, month, day, hour, minute, second, millisecond)`: - Same as before, but allows specifying the individual date components separately. - All parameters after `day` are optional. - -- `DATE_ISO8601(date)`: Returns an ISO8601 datetime string from `date`. - The datetime string will always use UTC time, indicated by the `Z` at its end. - -- `DATE_ISO8601(year, month, day, hour, minute, second, millisecond)`: - same as before, but allows specifying the individual date components separately. - All parameters after `day` are optional. - -These two above date functions accept the following input values: - -- numeric timestamps, indicating the number of milliseconds elapsed since the UNIX - epoch (i.e. January 1st 1970 00:00:00 UTC). - An example timestamp value is `1399472349522`, which translates to - `2014-05-07T14:19:09.522Z`. - -- datetime strings in formats `YYYY-MM-DDTHH:MM:SS.MMM`, `YYYY-MM-DD HH:MM:SS.MMM`, or - `YYYY-MM-DD`. Milliseconds are always optional. - - A timezone difference may optionally be added at the end of the string, with the - hours and minutes that need to be added or subtracted to the datetime value. - For example, `2014-05-07T14:19:09+01:00` can be used to specify a one hour offset, - and `2014-05-07T14:19:09+07:30` can be specified for seven and half hours offset. - Negative offsets are also possible. Alternatively to an offset, a `Z` can be used - to indicate UTC / Zulu time. - - An example value is `2014-05-07T14:19:09.522Z` meaning May 7th 2014, 14:19:09 and - 522 milliseconds, UTC / Zulu time. Another example value without time component is - `2014-05-07Z`. - - Please note that if no timezone offset is specified in a datestring, ArangoDB will - assume UTC time automatically. This is done to ensure portability of queries across - servers with different timezone settings, and because timestamps will always be - UTC-based. - -- individual date components as separate function arguments, in the following order: - - year - - month - - day - - hour - - minute - - second - - millisecond - - All components following `day` are optional and can be omitted. Note that no - timezone offsets can be specified when using separate date components, and UTC / - Zulu time will be used. - -The following calls to `DATE_TIMESTAMP` are equivalent and will all return -`1399472349522`: - - DATE_TIMESTAMP("2014-05-07T14:19:09.522") - DATE_TIMESTAMP("2014-05-07T14:19:09.522Z") - DATE_TIMESTAMP("2014-05-07 14:19:09.522") - DATE_TIMESTAMP("2014-05-07 14:19:09.522Z") - DATE_TIMESTAMP(2014, 5, 7, 14, 19, 9, 522) - DATE_TIMESTAMP(1399472349522) - -The same is true for calls to `DATE_ISO8601` that also accepts variable input -formats: - - DATE_ISO8601("2014-05-07T14:19:09.522Z") - DATE_ISO8601("2014-05-07 14:19:09.522Z") - DATE_ISO8601(2014, 5, 7, 14, 19, 9, 522) - DATE_ISO8601(1399472349522) - -The above functions are all equivalent and will return `"2014-05-07T14:19:09.522Z"`. - -The following date functions can be used with dates created by `DATE_TIMESTAMP` and -`DATE_ISO8601`: - -- `DATE_DAYOFWEEK(date)`: Returns the weekday number of `date`. The return values have - the following meanings: - - 0: Sunday - - 1: Monday - - 2: Tuesday - - 3: Wednesday - - 4: Thursday - - 5: Friday - - 6: Saturday - -- `DATE_YEAR(date)`: Returns the year part of `date` as a number. - -- `DATE_MONTH(date)`: Returns the month part of `date` as a number. - -- `DATE_DAY(date)`: Returns the day part of `date` as a number. - -- `DATE_HOUR(date)`: Returns the hour part of `date` as a number. - -- `DATE_MINUTE(date)`: Returns the minute part of `date` as a number. - -- `DATE_SECOND(date)`: Returns the seconds part of `date` as a number. - -- `DATE_MILLISECOND(date)`: Returns the milliseconds part of `date` as a number. - -The following other date functions are also available: - -- `DATE_NOW()`: Returns the current time as a timestamp. - - Note that this function is evaluated on every invocation and may return different - values when invoked multiple times in the same query. - -The following other AQL functions have been added in ArangoDB 2.1: - -- `FLATTEN`: this function can turn an array of sub-arrays into a single flat array. All - array elements in the original array will be expanded recursively up to a configurable - depth. The expanded values will be added to the single result array. - - Example: - - FLATTEN([ 1, 2, [ 3, 4 ], 5, [ 6, 7 ], [ 8, [ 9, 10 ] ]) - - will expand the sub-arrays on the first level and produce: - - [ 1, 2, 3, 4, 5, 6, 7, 8, [ 9, 10 ] ] - - To fully flatten the array, the maximum depth can be specified (e.g. with a value of `2`): - - FLATTEN([ 1, 2, [ 3, 4 ], 5, [ 6, 7 ], [ 8, [ 9, 10 ] ], 2) - - This will fully expand the sub-arrays and produce: - - [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ] - -- `CURRENT_DATABASE`: this function will return the name of the database the current - query is executed in. - -- `CURRENT_USER`: this function returns the name of the current user that is executing - the query. If authorization is turned off or the query is executed outside of a - request context, no user is present and the function will return `null`. - -Cluster Dump and Restore ------------------------- - -The dump and restore tools, _arangodump_ and _arangorestore_, can now be used to -dump and restore collections in a cluster. Additionally, a collection dump from -a standalone ArangoDB server can be imported into a cluster, and vice versa. - -Web Interface Improvements --------------------------- - -The web interface in version 2.1 has a more compact dashboard. It provides -charts with time-series for incoming requests, HTTP transfer volume and some -server resource usage figures. - -Additionally it provides trend indicators (e.g. 15 min averages) and -distribution charts (aka histogram) for some figures. - -Foxx Improvements ------------------ - -To easily access a file inside the directory of a Foxx application from within -Foxx, Foxx's `applicationContext` now provides the `foxxFilename()` function. It -can be used to assemble the full filename of a file inside the application's -directory. The `applicationContext` can be accessed as global variable from any -module within a Foxx application. - -The filename can be used inside Foxx actions or setup / teardown scripts, -e.g. to populate a Foxx application's collection with data. - -The `require` function now also prefers local modules when used from inside a -Foxx application. This allows putting modules inside the Foxx application -directory and requiring them easily. It also allows using application-specific -versions of libraries that are bundled with ArangoDB (such as underscore.js). - -Windows Installer ------------------ - -The Windows installer shipped with ArangoDB now supports installation of -ArangoDB for the current user or all users, with the required privileges. It -also supports the installation of ArangoDB as a service. - -Fixes for 32 bit systems ------------------------- - -Several issues have been fixed that occurred only when using ArangoDB on a 32 bits -operating system, specifically: - -- a crash in a third party component used to manage cluster data - -- a third party library that failed to initialize on 32 bit Windows, making arangod - and arangosh crash immediately. - -- overflows of values used for nanosecond-precision timeouts: these overflows - have led to invalid values being passed to socket operations, making them fail - and re-try too often - -Updated drivers ---------------- - -Several drivers for ArangoDB have been checked for compatibility with 2.1. The -current list of drivers with compatibility notes can be found online -[here](https://www.arangodb.org/driver). - -C++11 usage ------------ - -We have moved several files from C to C++, allowing more code reuse and reducing -the need for shipping data between the two. We have also decided to require -C++11 support for ArangoDB, which allows us to use some of the simplifications, -features and guarantees that this standard has in stock. - -That also means a compiler with C++11 support is required to build ArangoDB from -source. For instance GNU CC of at least version 4.8. - -Miscellaneous Improvements --------------------------- - -- Cancelable asynchronous jobs: several potentially long-running jobs can now be - canceled via an explicit cancel operation. This allows stopping long-running - queries, traversals or scripts without shutting down the complete ArangoDB - process. Job cancelation is provided for asynchronously executed jobs as is - described in @ref HttpJobCancel. - -- Server-side periodic task management: an ArangoDB server now provides - functionality to register and unregister periodic tasks. Tasks are - user-defined JavaScript actions that can be run periodically and - automatically, independent of any HTTP requests. - - The following task management functions are provided: - - - require("org/arangodb/tasks").register(): registers a periodic task - - require("org/arangodb/tasks").unregister(): unregisters and removes a periodic task - - require("org/arangodb/tasks").get(): retrieves a specific tasks or all existing tasks - - An example task (to be executed every 15 seconds) can be registered like this: - - var tasks = require("org/arangodb/tasks"); - tasks.register({ - name: "this is an example task with parameters", - period: 15, - command: function (params) { - var greeting = params.greeting; - var data = JSON.stringify(params.data); - require('console').log('%s from parameter task: %s', greeting, data); - }, - params: { greeting: "hi", data: "how are you?" } - }); - - Please refer to the section @ref Tasks for more details. - -- The `figures` method of a collection now returns data about the collection's - index memory consumption. The returned value `indexes.size` will contain the - total amount of memory acquired by all indexes of the collection. This figure - can be used to assess the memory impact of indexes. - -- Capitalized HTTP response headers: from version 2.1, ArangoDB will return - capitalized HTTP headers by default, e.g. `Content-Length` instead of - `content-length`. Though the HTTP specification states that headers field - name are case-insensitive, several older client tools rely on a specific case - in HTTP response headers. This changes make ArangoDB a bit more compatible - with those. - -- Simplified usage of `db._createStatement()`: to easily run an AQL query, the - method `db._createStatement` now allows passing the AQL query as a string. - Previously it required the user to pass an object with a `query` attribute - (which then contained the query string). - - ArangoDB now supports both versions: - - db._createStatement(queryString); - db._createStatement({ query: queryString }); diff --git a/Documentation/Books/Manual/ReleaseNotes/NewFeatures22.md b/Documentation/Books/Manual/ReleaseNotes/NewFeatures22.md deleted file mode 100644 index ac26eaef6daa..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/NewFeatures22.md +++ /dev/null @@ -1,268 +0,0 @@ -Features and Improvements in ArangoDB 2.2 -========================================= - -The following list shows in detail which features have been added or improved in -ArangoDB 2.2. ArangoDB 2.2 also contains several bugfixes that are not listed -here. - -AQL improvements ----------------- - -### Data modification AQL queries - -Up to including version 2.1, AQL supported data retrieval operations only. -Starting with ArangoDB version 2.2, AQL also supports the following -data modification operations: - -- INSERT: insert new documents into a collection -- UPDATE: partially update existing documents in a collection -- REPLACE: completely replace existing documents in a collection -- REMOVE: remove existing documents from a collection - -Data-modification operations are normally combined with other AQL -statements such as *FOR* loops and *FILTER* conditions to determine -the set of documents to operate on. For example, the following query -will find all documents in collection *users* that match a specific -condition and set their *status* variable to *inactive*: - - FOR u IN users - FILTER u.status == 'not active' - UPDATE u WITH { status: 'inactive' } IN users - -The following query copies all documents from collection *users* into -collection *backup*: - - FOR u IN users - INSERT u IN backup - -And this query removes documents from collection *backup*: - - FOR doc IN backup - FILTER doc.lastModified < DATE_NOW() - 3600 - REMOVE doc IN backup - -For more information on data-modification queries, please refer to -[Data modification queries](../../AQL/DataQueries.html#data-modification-queries). - -### Updatable variables - -Previously, the value of a variable assigned in an AQL query with the `LET` keyword -was not updatable in an AQL query. This prevented statements like the following from -being executable: - - LET sum = 0 - FOR v IN values - SORT v.year - LET sum = sum + v.value - RETURN { year: v.year, value: v.value, sum: sum } - -### Other AQL improvements - -* added AQL TRANSLATE function - - This function can be used to perform lookups from static objects, e.g. - - LET countryNames = { US: "United States", UK: "United Kingdom", FR: "France" } - RETURN TRANSLATE("FR", countryNames) - - LET lookup = { foo: "foo-replacement", bar: "bar-replacement", baz: "baz-replacement" } - RETURN TRANSLATE("foobar", lookup, "not contained!") - - -Write-ahead log ---------------- - -All write operations in an ArangoDB server will now be automatically logged -in the server's write-ahead log. The write-ahead log is a set of append-only -logfiles, and it is used in case of a crash recovery and for replication. - -Data from the write-ahead log will eventually be moved into the journals or -datafiles of collections, allowing the server to remove older write-ahead logfiles. - -Cross-collection transactions in ArangoDB should benefit considerably by this -change, as less writes than in previous versions are required to ensure the data -of multiple collections are atomically and durably committed. All data-modifying -operations inside transactions (insert, update, remove) will write their -operations into the write-ahead log directly now. In previous versions, such -operations were buffered until the commit or rollback occurred. Transactions with -multiple operations should therefore require less physical memory than in previous -versions of ArangoDB. - -The data in the write-ahead log can also be used in the replication context. In -previous versions of ArangoDB, replicating from a master required turning on a -special replication logger on the master. The replication logger caused an extra -write operation into the *_replication* system collection for each actual write -operation. This extra write is now superfluous. Instead, slaves can read directly -from the master's write-ahead log to get informed about most recent data changes. -This removes the need to store data-modification operations in the *_replication* -collection altogether. - -For the configuration of the write-ahead log, please refer to -[Write-ahead log options](../Programs/Arangod/Wal.md). - -The introduction of the write-ahead log also removes the need to configure and -start the replication logger on a master. Though the replication logger object -is still available in ArangoDB 2.2 to ensure API compatibility, starting, stopping, -or configuring it will have no effect. - - -Performance improvements ------------------------- - -* Removed sorting of attribute names when in collection shaper - - In previous versions of ArangoDB, adding a document with previously not-used - attribute names caused a full sort of all attribute names used in the - collection. The sorting was done to ensure fast comparisons of attribute - names in some rare edge cases, but it considerably slowed down inserts into - collections with many different or even unique attribute names. - -* Specialized primary index implementation to allow faster hash table - rebuilding and reduce lookups in datafiles for the actual value of `_key`. - This also reduces the amount of random memory accesses for primary index inserts. - -* Reclamation of index memory when deleting last document in collection - - Deleting documents from a collection did not lead to index sizes being reduced. - Instead, the index memory was kept allocated and re-used later when a collection - was refilled with new documents. Now, index memory of primary indexes and hash - indexes is reclaimed instantly when the last document in a collection is removed. - -* Prevent buffering of long print results in arangosh's and arangod's print - command - - This change will emit buffered intermediate print results and discard the - output buffer to quickly deliver print results to the user, and to prevent - constructing very large buffers for large results. - - -Miscellaneous improvements --------------------------- - -* Added `insert` method as an alias for `save`. Documents can now be inserted into - a collection using either method: - - db.test.save({ foo: "bar" }); - db.test.insert({ foo: "bar" }); - -* Cleanup of options for data-modification operations - - Many of the data-modification operations had signatures with many optional - bool parameters, e.g.: - - db.test.update("foo", { bar: "baz" }, true, true, true) - db.test.replace("foo", { bar: "baz" }, true, true) - db.test.remove("foo", true, true) - db.test.save({ bar: "baz" }, true) - - Such long parameter lists were unintuitive and hard to use when only one of - the optional parameters should have been set. - - To make the APIs more usable, the operations now understand the following - alternative signature: - - collection.update(key, update-document, options) - collection.replace(key, replacement-document, options) - collection.remove(key, options) - collection.save(document, options) - - Examples: - - db.test.update("foo", { bar: "baz" }, { overwrite: true, keepNull: true, waitForSync: true }) - db.test.replace("foo", { bar: "baz" }, { overwrite: true, waitForSync: true }) - db.test.remove("foo", { overwrite: true, waitForSync: true }) - db.test.save({ bar: "baz" }, { waitForSync: true }) - -* Added `--overwrite` option to arangoimp - - This allows removing all documents in a collection before importing into it - using arangoimp. - -* Honor startup option `--server.disable-statistics` when deciding whether or not - to start periodic statistics collection jobs - - Previously, the statistics collection jobs were started even if the server was - started with the `--server.disable-statistics` flag being set to `true`. Now if - the option is set to `true`, no statistics will be collected on the server. - -* Disallow storing of JavaScript objects that contain JavaScript native objects - of type `Date`, `Function`, `RegExp` or `External`, e.g. - - db.test.save({ foo: /bar/ }); - db.test.save({ foo: new Date() }); - - This will now print - - Error: cannot be converted into JSON shape: could not shape document - - Previously, objects of these types were silently converted into an empty object - (i.e. `{ }`) and no warning was issued. - - To store such objects in a collection, explicitly convert them into strings - like this: - - db.test.save({ foo: String(/bar/) }); - db.test.save({ foo: String(new Date()) }); - - -Removed features ----------------- - -### MRuby integration for arangod - -ArangoDB had an experimental MRuby integration in some of the publish builds. -This wasn't continuously developed, and so it has been removed in ArangoDB 2.2. - -This change has led to the following startup options being superfluous: - -- `--ruby.gc-interval` -- `--ruby.action-directory` -- `--ruby.modules-path` -- `--ruby.startup-directory` - -Specifying these startup options will do nothing in ArangoDB 2.2, so using these -options should be avoided from now on as they might be removed in a future version -of ArangoDB. - -### Removed startup options - -The following startup options have been removed in ArangoDB 2.2. Specifying them -in the server's configuration file will not produce an error to make migration -easier. Still, usage of these options should be avoided as they will not have any -effect and might fully be removed in a future version of ArangoDB: - -- `--database.remove-on-drop` -- `--database.force-sync-properties` -- `--random.no-seed` -- `--ruby.gc-interval` -- `--ruby.action-directory` -- `--ruby.modules-path` -- `--ruby.startup-directory` -- `--server.disable-replication-logger` - - - -Multi Collection Graphs ------------------------ -ArangoDB is a multi model database with native graph support. -In version 2.2 the features for graphs have been improved by integration of a new graph module. -All graphs created with the old module are automatically migrated into the new module but can still be used by the old module. - -### New graph module -Up to including version 2.1, ArangoDB offered a module for graphs and graph operations. -This module allowed you to use exactly one edge collection together with one vertex collection in a graph. -With ArangoDB version 2.2 this graph module is deprecated and a new graph module is offered. -This new module allows to combine an arbitrary number of vertex collections and edge collections in the same graph. -For each edge collection a list of collections containing source vertices and a list of collections containing target vertices can be defined. -If an edge is stored ArangoDB checks if this edge is valid in this collection. -Furthermore if a vertex is removed from one of the collections all connected edges will be removed as well, giving the guarantee of no loose ends in the graphs. -The layout of the graph can be modified at runtime by adding or removing collections and changing the definitions for edge collections. -All operations on the graph level are transactional by default. - -### Graphs in AQL -Multi collection graphs have been added to AQL as well. -Basic functionality (getting vertices, edges, neighbors) can be executed using the entire graph. -Also more advanced features like shortest path calculations, characteristic factors of the graph or traversals have been integrated into AQL. -For these functions all graphs created with the graph module can be used. - - diff --git a/Documentation/Books/Manual/ReleaseNotes/NewFeatures23.md b/Documentation/Books/Manual/ReleaseNotes/NewFeatures23.md deleted file mode 100644 index 1001ac26ac1f..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/NewFeatures23.md +++ /dev/null @@ -1,308 +0,0 @@ -Features and Improvements in ArangoDB 2.3 -========================================= - -The following list shows in detail which features have been added or improved in -ArangoDB 2.3. ArangoDB 2.3 also contains several bugfixes that are not listed -here. - -AQL improvements ----------------- - -### Framework improvements - -AQL queries are now sent through a query optimizer framework before execution. -The query optimizer framework will first convert the internal representation of -the query, the abstract syntax tree, into an initial execution plan. - -The execution plan is then send through optimizer rules that may directly modify -the plan in place or create a new variant of the plan. New plans might again be -optimized, allowing the optimizer to carry out several optimizations. - -After creating plans, the optimizer will estimate the costs for each plan and -pick the plan with the lowest cost (termed the *optimal plan*) for the actual -query execution. - -With the `explain()` method of `ArangoStatement` users can check which execution -plan the optimizer pick or retrieve a list of other plans that optimizer did not -choose. The plan will reveal many details about which indexes are used etc. -`explain()` will also return the of optimizer rules applied so users can validate -whether or not a query allows using a specific optimization. - -Execution of AQL queries has been rewritten in C++, allowing many queries -to avoid the conversion of documents between ArangoDB's internal low-level data -structure and the V8 object representation format. - -The framework for optimizer rules is now also generally cluster-aware, allowing -specific optimizations for queries that run in a cluster. Additionally, the -optimizer was designed to be extensible in order to add more optimizations -in the future. - - -### Language improvements - -#### Alternative operator syntax - -ArangoDB 2.3 allows to use the following alternative forms for the -logical operators: -- `AND`: logical and -- `OR`: logical or -- `NOT`: negation - -This new syntax is just an alternative to the old syntax, allowing easier -migration from SQL. The old syntax is still fully supported and will be: -- `&&`: logical and -- `||`: logical or -- `!`: negation - - -#### `NOT IN` operator -AQL now has a dedicated `NOT IN` operator. - -Previously, a `NOT IN` was only achievable by writing a negated `IN` condition: - - FOR i IN ... FILTER ! (i IN [ 23, 42 ]) ... - -In ArangoDB 2.3, the same result can now alternatively be achieved by writing -the more intuitive variant: - - FOR i IN ... FILTER i NOT IN [ 23, 42 ] ... - - -#### Improvements of built-in functions - -The following AQL string functions have been added: - -- `LTRIM(value, characters)`: left-trims a string value -- `RTRIM(value, characters)`: right-trims a string value -- `FIND_FIRST(value, search, start, end)`: finds the first occurrence - of a search string -- `FIND_LAST(value, search, start, end)`: finds the last occurrence of a - search string -- `SPLIT(value, separator, limit) `: splits a string into an array, - using a separator -- `SUBSTITUTE(value, search, replace, limit)`: replaces characters - or strings inside another - -The following other AQL functions have been added: - -- `VALUES(document)`: returns the values of an object as an array (this is - the counterpart to the already existing `ATTRIBUTES` function) -- `ZIP(attributes, values)`: returns an object constructed from attributes - and values passed in separate parameters -- `PERCENTILE(values, n, method)`: returns the nths percentile of the - values provided, using rank or interpolation method - -The already existing functions `CONCAT` and `CONCAT_SEPARATOR` now support -array arguments, e.g.: - - /* "foobarbaz" */ - CONCAT([ 'foo', 'bar', 'baz']) - - /* "foo,bar,baz" */ - CONCAT_SEPARATOR(", ", [ 'foo', 'bar', 'baz']) - - -#### AQL queries throw less exceptions - -In previous versions of ArangoDB, AQL queries aborted with an exception in many -situations and threw a runtime exception. For example, exceptions were thrown when -trying to find a value using the `IN` operator in a non-array element, when trying -to use non-boolean values with the logical operands `&&` or `||` or `!`, when using -non-numeric values in arithmetic operations, when passing wrong parameters into -functions etc. - -The fact that many AQL operators could throw exceptions led to a lot of questions -from users, and a lot of more-verbose-than-necessary queries. For example, the -following query failed when there were documents that did not have a `topics` -attribute at all: - - FOR doc IN mycollection - FILTER doc.topics IN [ "something", "whatever" ] - RETURN doc - -This forced users to rewrite the query as follows: - - FOR doc IN mycollection - FILTER IS_LIST(doc.topics) && doc.topics IN [ "something", "whatever" ] - RETURN doc - -In ArangoDB 2.3 this has been changed to make AQL easier to use. The change -provides an extra benefit, and that is that non-throwing operators allow the -query optimizer to perform much more transformations in the query without -changing its overall result. - -Here is a summary of changes: -- when a non-array value is used on the right-hand side of the `IN` operator, the - result will be `false` in ArangoDB 2.3, and no exception will be thrown. -- the boolean operators `&&` and `||` do not throw in ArangoDB 2.3 if any of the - operands is not a boolean value. Instead, they will perform an implicit cast of - the values to booleans. Their result will be as follows: - - `lhs && rhs` will return `lhs` if it is `false` or would be `false` when converted - into a boolean. If `lhs` is `true` or would be `true` when converted to a boolean, - `rhs` will be returned. - - `lhs || rhs` will return `lhs` if it is `true` or would be `true` when converted - into a boolean. If `lhs` is `false` or would be `false` when converted to a boolean, - `rhs` will be returned. - - `! value` will return the negated value of `value` converted into a boolean -- the arithmetic operators (`+`, `-`, `*`, `/`, `%`) can be applied to any value and - will not throw exceptions when applied to non-numeric values. Instead, any value used - in these operators will be casted to a numeric value implicitly. If no numeric result - can be produced by an arithmetic operator, it will return `null` in ArangoDB 2.3. This - is also true for division by zero. -- passing arguments of invalid types into AQL functions does not throw a runtime - exception in most cases, but may produce runtime warnings. Built-in AQL functions that - receive invalid arguments will then return `null`. - - -Performance improvements ------------------------- - -### Non-unique hash indexes - -The performance of insertion into *non-unique* hash indexes has been improved -significantly. This fixes performance problems in case attributes were indexes -that contained only very few distinct values, or when most of the documents -did not even contain the indexed attribute. This also fixes problems when -loading collections with such indexes. - -The insertion time now scales linearly with the number of documents regardless -of the cardinality of the indexed attribute. - - -### Reverse iteration over skiplist indexes - -AQL queries can now use a sorted skiplist index for reverse iteration. This -allows several queries to run faster than in previous versions of ArangoDB. - -For example, the following AQL query can now use the index on `doc.value`: - - FOR doc IN mycollection - FILTER doc.value > 23 - SORT doc.values DESC - RETURN doc - -Previous versions of ArangoDB did not use the index because of the descending -(`DESC`) sort. - -Additionally, the new AQL optimizer can use an index for sorting now even -if the AQL query does not contain a `FILTER` statement. This optimization was -not available in previous versions of ArangoDB. - - -### Added basic support for handling binary data in Foxx - -Buffer objects can now be used when setting the response body of any Foxx action. -This allows Foxx actions to return binary data. - -Requests with binary payload can be processed in Foxx applications by -using the new method `res.rawBodyBuffer()`. This will return the unparsed request -body as a Buffer object. - -There is now also the method `req.requestParts()` available in Foxx to retrieve -the individual components of a multipart HTTP request. That can be used for example -to process file uploads. - - -Additionally, the `res.send()` method has been added as a convenience method for -returning strings, JSON objects or Buffers from a Foxx action. It provides some -auto-detection based on its parameter value: - -```js -res.send("

some HTML

"); // returns an HTML string -res.send({ success: true }); // returns a JSON object -res.send(new Buffer("some binary data")); // returns binary data -``` - -The convenience method `res.sendFile()` can now be used to return the contents of -a file from a Foxx action. They file may contain binary data: - -```js -res.sendFile(applicationContext.foxxFilename("image.png")); -``` - -The filesystem methods `fs.write()` and `fs.readBuffer()` can be used to work -with binary data, too: - -`fs.write()` will perform an auto-detection of its second parameter's value so it -works with Buffer objects: - -```js -fs.write(filename, "some data"); // saves a string value in file -fs.write(filename, new Buffer("some binary data")); // saves (binary) contents of a buffer -``` - -`fs.readBuffer()` has been added as a method to read the contents of an -arbitrary file into a Buffer object. - - -### Web interface - -Batch document removal and move functionality has been added to the web interface, -making it easier to work with multiple documents at once. Additionally, basic -JSON import and export tools have been added. - - -### Command-line options added - -The command-line option `--javascript.v8-contexts` was added to arangod to -provide better control over the number of V8 contexts created in arangod. - -Previously, the number of V8 contexts arangod created at startup was equal -to the number of server threads (as specified by option `--server.threads`). - -In some situations it may be more sensible to create different amounts of threads -and V8 contexts. This is because each V8 contexts created will consume memory -and requires CPU resources for periodic garbage collection. Contrary, server -threads do not have such high memory or CPU footprint. - -If the option `--javascript.v8-contexts` is not specified, the number of V8 -contexts created at startup will remain equal to the number of server threads. -Thus no change in configuration is required to keep the same behavior as in -previous ArangoDB versions. - - -The command-line option `--log.use-local-time` was added to print dates and -times in ArangoDB's log in the server-local timezone instead of UTC. If it -is not set, the timezone will default to UTC. - - -The option `--backslash-escape` has been added to arangoimp. Specifying this -option will use the backslash as the escape character for literal quotes when -parsing CSV files. The escape character for literal quotes is still the -double quote character. - - -Miscellaneous improvements --------------------------- - -ArangoDB's built-in HTTP server now supports HTTP pipelining. - -The ArangoShell tutorial from the arangodb.com website is now integrated into -the ArangoDB shell. - -Powerful Foxx Enhancements --------------------------- - -With the new **job queue** feature you can run async jobs to communicate with external services, **Foxx queries** make writing complex AQL queries much easier and **Foxx sessions** will handle the authentication and session hassle for you. - -### Foxx Queries - -Writing long AQL queries in JavaScript can quickly become unwieldy. As of 2.3 ArangoDB bundles the [ArangoDB Query Builder](https://npmjs.org/package/aqb) module that provides a JavaScript API for writing complex AQL queries without string concatenation. All built-in functions that accept AQL strings now support query builder instances directly. Additionally Foxx provides a method `Foxx.createQuery` for creating parametrized queries that can return Foxx models or apply arbitrary transformations to the query results. - -### Foxx Sessions - -The session functionality in Foxx has been completely rewritten. The old `activateAuthentication` API is still supported but may be deprecated in the future. The new `activateSessions` API supports cookies or configurable headers, provides optional JSON Web Token and cryptographic signing support and uses the new sessions Foxx app. - -ArangoDB 2.3 provides Foxx apps for user management and salted hash-based authentication which can be replaced with or supplemented by alternative implementations. For an example app using both the built-in authentication and OAuth2 see the [Foxx Sessions Example app](https://github.com/arangodb/foxx-sessions-example). - -### Foxx Queues - -Foxx now provides async workers via the Foxx Queues API. Jobs enqueued in a job queue will be executed asynchronously outside of the request/response cycle of Foxx controllers and can be used to communicate with external services or perform tasks that take a long time to complete or may require multiple attempts. - -Jobs can be scheduled in advance or set to be executed immediately, the number of retry attempts, the retry delay as well as success and failure handlers can be defined for each job individually. Job types that integrate various external services for transactional e-mails, logging and user tracking can be found in the Foxx app registry. - -### Misc - -The request and response objects in Foxx controllers now provide methods for reading and writing raw cookies and signed cookies. - -Mounted Foxx apps will now be loaded when arangod starts rather than at the first database request. This may result in slightly slower start up times (but a faster response for the first request). diff --git a/Documentation/Books/Manual/ReleaseNotes/NewFeatures24.md b/Documentation/Books/Manual/ReleaseNotes/NewFeatures24.md deleted file mode 100644 index c21259f0336c..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/NewFeatures24.md +++ /dev/null @@ -1,293 +0,0 @@ -Features and Improvements in ArangoDB 2.4 -========================================= - -The following list shows in detail which features have been added or improved in -ArangoDB 2.4. ArangoDB 2.4 also contains several bugfixes that are not listed -here. For a list of bugfixes, please consult the [CHANGELOG](https://github.com/arangodb/arangodb/blob/devel/CHANGELOG). - - -V8 version upgrade ------------------- - -The built-in version of V8 has been upgraded from 3.16.14 to 3.29.59. -This activates several ES6 (also dubbed *Harmony* or *ES.next*) features in -ArangoDB, both in the ArangoShell and the ArangoDB server. They can be -used for scripting and in server-side actions such as Foxx routes, traversals -etc. - -The following ES6 features are available in ArangoDB 2.4 by default: - -* iterators -* the `of` operator -* symbols -* predefined collections types (Map, Set etc.) -* typed arrays - -Many other ES6 features are disabled by default, but can be made available by -starting arangod or arangosh with the appropriate options: - -* arrow functions -* proxies -* generators -* String, Array, and Number enhancements -* constants -* enhanced object and numeric literals - -To activate all these ES6 features in arangod or arangosh, start it with -the following options: - - arangosh --javascript.v8-options="--harmony --harmony_generators" - -More details on the available ES6 features can be found in -[this blog](https://jsteemann.github.io/blog/2014/12/19/using-es6-features-in-arangodb/). - - -FoxxGenerator -------------- - -ArangoDB 2.4 is shipped with FoxxGenerator, a framework for building -standardized Hypermedia APIs easily. The generated APIs can be consumed with -client tools that understand [Siren](https://github.com/kevinswiber/siren). - -Hypermedia is the simple idea that our HTTP APIs should have links between their -endpoints in the same way that our web sites have links between -them. FoxxGenerator is based on the idea that you can represent an API as a -statechart: Every endpoint is a state and the links are the transitions between -them. Using your description of states and transitions, it can then create an -API for you. - -The FoxxGenerator can create APIs based on a semantic description of entities -and transitions. A blog series on the use cases and how to use the Foxx generator -is here: - -* [part 1](https://www.arangodb.com/2014/11/26/building-hypermedia-api-json) -* [part 2](https://www.arangodb.com/2014/12/02/building-hypermedia-apis-design) -* [part 3](https://www.arangodb.com/2014/12/08/building-hypermedia-apis-foxxgenerator) - -A cookbook recipe for getting started with FoxxGenerator is [here](https://docs.arangodb.com/2.8/Cookbook/FoxxGeneratorFirstSteps.html). - -AQL improvements ----------------- - -### Optimizer improvements - -The AQL optimizer has been enhanced to use of indexes in queries in several -additional cases. Filters containing the `IN` operator can now make use of -indexes, and multiple OR- or AND-combined filter conditions can now also use -indexes if the filter conditions refer to the same indexed attribute. - -Here are a few examples of queries that can now use indexes but couldn't before: - - FOR doc IN collection - FILTER doc.indexedAttribute == 1 || doc.indexedAttribute > 99 - RETURN doc - - FOR doc IN collection - FILTER doc.indexedAttribute IN [ 3, 42 ] || doc.indexedAttribute > 99 - RETURN doc - - FOR doc IN collection - FILTER (doc.indexedAttribute > 2 && doc.indexedAttribute < 10) || - (doc.indexedAttribute > 23 && doc.indexedAttribute < 42) - RETURN doc - - -Additionally, the optimizer rule `remove-filter-covered-by-index` has been -added. This rule removes FilterNodes and CalculationNodes from an execution -plan if the filter condition is already covered by a previous IndexRangeNode. -Removing the filter's CalculationNode and the FilterNode itself will speed -up query execution because the query requires less computation. - -Furthermore, the new optimizer rule `remove-sort-rand` will remove a `SORT RAND()` -statement and move the random iteration into the appropriate `EnumerateCollectionNode`. -This is usually more efficient than individually enumerating and sorting. - - -### Data-modification queries returning documents - -`INSERT`, `REMOVE`, `UPDATE` or `REPLACE` queries now can optionally return -the documents inserted, removed, updated, or replaced. This is helpful for tracking -the auto-generated attributes (e.g. `_key`, `_rev`) created by an `INSERT` and in -a lot of other situations. - -In order to return documents from a data-modification query, the statement must -immediately be immediately followed by a `LET` statement that assigns either the -pseudo-value `NEW` or `OLD` to a variable. This `LET` statement must be followed -by a `RETURN` statement that returns the variable introduced by `LET`: - - FOR i IN 1..100 - INSERT { value: i } IN test LET inserted = NEW RETURN inserted - - FOR u IN users - FILTER u.status == 'deleted' - REMOVE u IN users LET removed = OLD RETURN removed - - FOR u IN users - FILTER u.status == 'not active' - UPDATE u WITH { status: 'inactive' } IN users LET updated = NEW RETURN updated - -`NEW` refers to the inserted or modified document revision, and `OLD` refers -to the document revision before update or removal. `INSERT` statements can -only refer to the `NEW` pseudo-value, and `REMOVE` operations only to `OLD`. -`UPDATE` and `REPLACE` can refer to either. - -In all cases the full documents will be returned with all their attributes, -including the potentially auto-generated attributes such as `_id`, `_key`, or `_rev` -and the attributes not specified in the update expression of a partial update. - - -### Language improvements - -#### `COUNT` clause - -An optional `COUNT` clause was added to the `COLLECT` statement. The `COUNT` -clause allows for more efficient counting of values. - -In previous versions of ArangoDB one had to write the following to count -documents: - - RETURN LENGTH ( - FOR doc IN collection - FILTER ...some condition... - RETURN doc - ) - -With the `COUNT` clause, the query can be modified to - - FOR doc IN collection - FILTER ...some condition... - COLLECT WITH COUNT INTO length - RETURN length - -The latter query will be much more efficient because it will not produce any -intermediate results with need to be shipped from a subquery into the `LENGTH` -function. - -The `COUNT` clause can also be used to count the number of items in each group: - - FOR doc IN collection - FILTER ...some condition... - COLLECT group = doc.group WITH COUNT INTO length - return { group: group, length: length } - - -#### `COLLECT` modifications - -In ArangoDB 2.4, `COLLECT` operations can be made more efficient if only a -small fragment of the group values is needed later. For these cases, `COLLECT` -provides an optional conversion expression for the `INTO` clause. This -expression controls the value that is inserted into the array of group values. -It can be used for projections. - -The following query only copies the `dateRegistered` attribute of each document -into the groups, potentially saving a lot of memory and computation time -compared to copying `doc` completely: - - FOR doc IN collection - FILTER ...some condition... - COLLECT group = doc.group INTO dates = doc.dateRegistered - return { group: group, maxDate: MAX(dates) } - -Compare this to the following variant of the query, which was the only way -to achieve the same result in previous versions of ArangoDB: - - FOR doc IN collection - FILTER ...some condition... - COLLECT group = doc.group INTO dates - return { group: group, maxDate: MAX(dates[*].doc.dateRegistered) } - -The above query will need to copy the full `doc` attribute into the `lengths` -variable, whereas the new variant will only copy the `dateRegistered` -attribute of each `doc`. - - -#### Subquery syntax - -In previous versions of ArangoDB, subqueries required extra parentheses -around them, and this caused confusion when subqueries were used as function -parameters. For example, the following query did not work: - - LET values = LENGTH( - FOR doc IN collection RETURN doc - ) - -but had to be written as follows: - - LET values = LENGTH(( - FOR doc IN collection RETURN doc - )) - -This was unintuitive and is fixed in version 2.4 so that both variants of -the query are accepted and produce the same result. - - -### Web interface - -The `Applications` tab for Foxx applications in the web interface has got -a complete redesign. - -It will now only show applications that are currently running in ArangoDB. -For a selected application, a new detailed view has been created. This view -provides a better overview of the app, e.g.: - -* author -* license -* version -* contributors -* download links -* API documentation - -Installing a new Foxx application on the server is made easy using the new -`Add application` button. The `Add application` dialog provides all the -features already available in the `foxx-manager` console application plus some more: - -* install a Foxx application from Github -* install a Foxx application from a zip file -* install a Foxx application from ArangoDB's application store -* create a new Foxx application from scratch: this feature uses a generator to - create a Foxx application with pre-defined CRUD methods for a given list of collections. - The generated Foxx app can either be downloaded as a zip file or - be installed on the server. Starting with a new Foxx app has never been easier. - - -Miscellaneous improvements --------------------------- - -### Default endpoint is 127.0.0.1 - -The default endpoint for the ArangoDB server has been changed from `0.0.0.0` to -`127.0.0.1`. This will make new ArangoDB installations unaccessible from clients other -than localhost unless the configuration is changed. This is a security precaution -measure that has been requested as a feature a lot of times. - -If you are the development option `--enable-relative`, the endpoint will still -be `0.0.0.0`. - - -### System collections in replication - -By default, system collections are now included in replication and all replication API -return values. This will lead to user accounts and credentials data being replicated -from master to slave servers. This may overwrite slave-specific database users. - -If this is undesired, the `_users` collection can be excluded from replication -easily by setting the `includeSystem` attribute to `false` in the following commands: - -* replication.sync({ includeSystem: false }); -* replication.applier.properties({ includeSystem: false }); - -This will exclude all system collections (including `_aqlfunctions`, `_graphs` etc.) -from the initial synchronization and the continuous replication. - -If this is also undesired, it is also possible to specify a list of collections to -exclude from the initial synchronization and the continuous replication using the -`restrictCollections` attribute, e.g.: - -```js -require("org/arangodb/replication").applier.properties({ - includeSystem: true, - restrictType: "exclude", - restrictCollections: [ "_users", "_graphs", "foo" ] -}); -``` - diff --git a/Documentation/Books/Manual/ReleaseNotes/NewFeatures25.md b/Documentation/Books/Manual/ReleaseNotes/NewFeatures25.md deleted file mode 100644 index 4b6b37aa8507..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/NewFeatures25.md +++ /dev/null @@ -1,310 +0,0 @@ -Features and Improvements in ArangoDB 2.5 -========================================= - -The following list shows in detail which features have been added or improved in -ArangoDB 2.5. ArangoDB 2.5 also contains several bugfixes that are not listed -here. For a list of bugfixes, please consult the [CHANGELOG](https://github.com/arangodb/arangodb/blob/devel/CHANGELOG). - - -V8 version upgrade ------------------- - -The built-in version of V8 has been upgraded from 3.29.54 to 3.31.74.1. -This allows activating additional ES6 (also dubbed *Harmony* or *ES.next*) features -in ArangoDB, both in the ArangoShell and the ArangoDB server. They can be -used for scripting and in server-side actions such as Foxx routes, traversals -etc. - -The following additional ES6 features become available in ArangoDB 2.5 by default: - -* iterators and generators -* template strings -* enhanced object literals -* enhanced numeric literals -* block scoping with `let` and constant variables using `const` (note: constant - variables require using strict mode, too) -* additional string methods (such as `startsWith`, `repeat` etc.) - - -Index improvements ------------------- - -### Sparse hash and skiplist indexes - -Hash and skiplist indexes can optionally be made sparse. Sparse indexes exclude documents -in which at least one of the index attributes is either not set or has a value of `null`. - -As such documents are excluded from sparse indexes, they may contain fewer documents than -their non-sparse counterparts. This enables faster indexing and can lead to reduced memory -usage in case the indexed attribute does occur only in some, but not all documents of the -collection. Sparse indexes will also reduce the number of collisions in non-unique hash -indexes in case non-existing or optional attributes are indexed. - -In order to create a sparse index, an object with the attribute `sparse` can be added to -the index creation commands: - -```js -db.collection.ensureHashIndex(attributeName, { sparse: true }); -db.collection.ensureHashIndex(attributeName1, attributeName2, { sparse: true }); -db.collection.ensureUniqueConstraint(attributeName, { sparse: true }); -db.collection.ensureUniqueConstraint(attributeName1, attributeName2, { sparse: true }); - -db.collection.ensureSkiplist(attributeName, { sparse: true }); -db.collection.ensureSkiplist(attributeName1, attributeName2, { sparse: true }); -db.collection.ensureUniqueSkiplist(attributeName, { sparse: true }); -db.collection.ensureUniqueSkiplist(attributeName1, attributeName2, { sparse: true }); -``` - -Note that in place of the above specialized index creation commands, it is recommended to use -the more general index creation command `ensureIndex`: - -```js -db.collection.ensureIndex({ type: "hash", sparse: true, unique: true, fields: [ attributeName ] }); -db.collection.ensureIndex({ type: "skiplist", sparse: false, unique: false, fields: [ "a", "b" ] }); -``` - -When not explicitly set, the `sparse` attribute defaults to `false` for new hash or -skiplist indexes. - -This causes a change in behavior when creating a unique hash index without specifying the -sparse flag: in 2.4, unique hash indexes were implicitly sparse, always excluding `null` values. -There was no option to control this behavior, and sparsity was neither supported for non-unique -hash indexes nor skiplists in 2.4. This implicit sparsity of unique hash indexes was considered -an inconsistency, and therefore the behavior was cleaned up in 2.5. As of 2.5, indexes will -only be created sparse if sparsity is explicitly requested. Existing unique hash indexes from 2.4 -or before will automatically be migrated so they are still sparse after the upgrade to 2.5. - -Geo indexes are implicitly sparse, meaning documents without the indexed location attribute or -containing invalid location coordinate values will be excluded from the index automatically. This -is also a change when compared to pre-2.5 behavior, when documents with missing or invalid -coordinate values may have caused errors on insertion when the geo index' `unique` flag was set -and its `ignoreNull` flag was not. This was confusing and has been rectified in 2.5. The method -`ensureGeoConstraint()` now does the same as `ensureGeoIndex()`. Furthermore, the attributes -`constraint`, `unique`, `ignoreNull` and `sparse` flags are now completely ignored when creating -geo indexes. - -The same is true for fulltext indexes. There is no need to specify non-uniqueness or sparsity for -geo or fulltext indexes. - -As sparse indexes may exclude some documents, they cannot be used for every type of query. -Sparse hash indexes cannot be used to find documents for which at least one of the indexed -attributes has a value of `null`. For example, the following AQL query cannot use a sparse -index, even if one was created on attribute `attr`: - - FOR doc In collection - FILTER doc.attr == null - RETURN doc - -If the lookup value is non-constant, a sparse index may or may not be used, depending on -the other types of conditions in the query. If the optimizer can safely determine that -the lookup value cannot be `null`, a sparse index may be used. When uncertain, the optimizer -will not make use of a sparse index in a query in order to produce correct results. - -For example, the following queries cannot use a sparse index on `attr` because the optimizer -will not know beforehand whether the comparison values for `doc.attr` will include `null`: - - FOR doc In collection - FILTER doc.attr == SOME_FUNCTION(...) - RETURN doc - - FOR other IN otherCollection - FOR doc In collection - FILTER doc.attr == other.attr - RETURN doc - -Sparse skiplist indexes can be used for sorting if the optimizer can safely detect that the -index range does not include `null` for any of the index attributes. - - -### Selectivity estimates - -Indexes of type `primary`, `edge` and `hash` now provide selectivity estimates. These -will be used by the AQL query optimizer when deciding about index usage. Using selectivity -estimates can lead to faster query execution when more selective indexes are used. - -The selectivity estimates are also returned by the `GET /_api/index` REST API method -in a sub-attribute `selectivityEstimate` for each index that supports it. This -attribute will be omitted for indexes that do not provide selectivity estimates. -If provided, the selectivity estimate will be a numeric value between 0 and 1. - -Selectivity estimates will also be reported in the result of `collection.getIndexes()` -for all indexes that support this. If no selectivity estimate can be determined for -an index, the attribute `selectivityEstimate` will be omitted here, too. - -The web interface also shows selectivity estimates for each index that supports this. - -Currently the following index types can provide selectivity estimates: -- primary index -- edge index -- hash index (unique and non-unique) - -No selectivity estimates will be provided for indexes when running in cluster mode. - - -AQL Optimizer improvements --------------------------- - -### Sort removal - -The AQL optimizer rule "use-index-for-sort" will now remove sorts also in case a non-sorted -index (e.g. a hash index) is used for only equality lookups and all sort attributes are covered -by the equality lookup conditions. - -For example, in the following query the extra sort on `doc.value` will be optimized away -provided there is an index on `doc.value`): - - FOR doc IN collection - FILTER doc.value == 1 - SORT doc.value - RETURN doc - -The AQL optimizer rule "use-index-for-sort" now also removes sort in case the sort criteria -excludes the left-most index attributes, but the left-most index attributes are used -by the index for equality-only lookups. - -For example, in the following query with a skiplist index on `value1`, `value2`, the sort can -be optimized away: - - FOR doc IN collection - FILTER doc.value1 == 1 - SORT doc.value2 - RETURN doc - - -### Constant attribute propagation - -The new AQL optimizer rule `propagate-constant-attributes` will look for attributes that are -equality-compared to a constant value, and will propagate the comparison value into other -equality lookups. This rule will only look inside `FILTER` conditions, and insert constant -values found in `FILTER`s, too. - -For example, the rule will insert `42` instead of `i.value` in the second `FILTER` of the -following query: - - FOR i IN c1 - FOR j IN c2 - FILTER i.value == 42 - FILTER j.value == i.value - RETURN 1 - - -### Interleaved processing - -The optimizer will now inspect AQL data-modification queries and detect if the query's -data-modification part can run in lockstep with the data retrieval part of the query, -or if the data retrieval part must be executed and completed first before the data-modification -can start. - -Executing both data retrieval and data-modification in lockstep allows using much smaller -buffers for intermediate results, reducing the memory usage of queries. Not all queries are -eligible for this optimization, and the optimizer will only apply the optimization when it can -safely detect that the data-modification part of the query will not modify data to be found -by the retrieval part. - - -### Query execution statistics - -The `filtered` attribute was added to AQL query execution statistics. The value of this -attribute indicates how many documents were filtered by `FilterNode`s in the AQL query. -Note that `IndexRangeNode`s can also filter documents by selecting only the required ranges -from the index. The `filtered` value will not include the work done by `IndexRangeNode`s, -but only the work performed by `FilterNode`s. - - -Language improvements ---------------------- - -### Dynamic attribute names in AQL object literals - -This change allows using arbitrary expressions to construct attribute names in object -literals specified in AQL queries. To disambiguate expressions and other unquoted -attribute names, dynamic attribute names need to be enclosed in brackets (`[` and `]`). - -Example: - - FOR i IN 1..100 - RETURN { [ CONCAT('value-of-', i) ] : i } - -### AQL functions - -The following AQL functions were added in 2.5: - -* `MD5(value)`: generates an MD5 hash of `value` -* `SHA1(value)`: generates an SHA1 hash of `value` -* `RANDOM_TOKEN(length)`: generates a random string value of the specified length - -Simplify Foxx usage -------------------- - -Thanks to our user feedback we learned that Foxx is a powerful, yet rather complicated concept. -With 2.5 we made it less complicated while keeping all its strength. -That includes a rewrite of the documentation as well as some code changes as follows: - -### Moved Foxx applications to a different folder. - -Until 2.4 Foxx apps were stored in the following folder structure: -`/databases//:`. -This caused some trouble as apps where cached based on name and version and updates did not apply. -Also the path on filesystem and the app's access URL had no relation to one another. -Now the path on filesystem is identical to the URL (except the appended APP): -`/_db///APP` - -### Rewrite of Foxx routing - -The routing of Foxx has been exposed to major internal changes we adjusted because of user feedback. -This allows us to set the development mode per mount point without having to change paths and hold -apps at separate locations. - -### Foxx Development mode - -The development mode used until 2.4 is gone. It has been replaced by a much more mature version. -This includes the deprecation of the javascript.dev-app-path parameter, which is useless since 2.5. -Instead of having two separate app directories for production and development, apps now reside in -one place, which is used for production as well as for development. -Apps can still be put into development mode, changing their behavior compared to production mode. -Development mode apps are still reread from disk at every request, and still they ship more debug -output. - -This change has also made the startup options `--javascript.frontend-development-mode` and -`--javascript.dev-app-path` obsolete. The former option will not have any effect when set, and the -latter option is only read and used during the upgrade to 2.5 and does not have any effects later. - -### Foxx install process - -Installing Foxx apps has been a two step process: import them into ArangoDB and mount them at a -specific mount point. These operations have been joined together. You can install an app at one -mount point, that's it. No fetch, mount, unmount, purge cycle anymore. The commands have been -simplified to just: - -* install: get your Foxx app up and running -* uninstall: shut it down and erase it from disk - -### Foxx error output - -Until 2.4 the errors produced by Foxx were not optimal. Often, the error message was just -`unable to parse manifest` and contained only an internal stack trace. -In 2.5 we made major improvements there, including a much more fine grained error output that -helps you debug your Foxx apps. The error message printed is now much closer to its source and -should help you track it down. - -Also we added the default handlers for unhandled errors in Foxx apps: - -* You will get a nice internal error page whenever your Foxx app is called but was not installed - due to any error -* You will get a proper error message when having an uncaught error appears in any app route - -In production mode the messages above will NOT contain any information about your Foxx internals -and are safe to be exposed to third party users. -In development mode the messages above will contain the stacktrace (if available), making it easier for -your in-house devs to track down errors in the application. - -### Foxx console - -We added a `console` object to Foxx apps. All Foxx apps now have a console object implementing -the familiar Console API in their global scope, which can be used to log diagnostic -messages to the database. This console also allows to read the error output of one specific Foxx. - -### Foxx requests -We added `org/arangodb/request` module, which provides a simple API for making HTTP requests -to external services. This is enables Foxx to be directly part of a micro service architecture. - diff --git a/Documentation/Books/Manual/ReleaseNotes/NewFeatures26.md b/Documentation/Books/Manual/ReleaseNotes/NewFeatures26.md deleted file mode 100644 index 068eb45f878f..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/NewFeatures26.md +++ /dev/null @@ -1,347 +0,0 @@ -Features and Improvements in ArangoDB 2.6 -========================================= - -The following list shows in detail which features have been added or improved in -ArangoDB 2.6. ArangoDB 2.6 also contains several bugfixes that are not listed -here. For a list of bugfixes, please consult the -[CHANGELOG](https://github.com/arangodb/arangodb/blob/devel/CHANGELOG). - -APIs added ----------- - -### Batch document removal and lookup commands - -The following commands have been added for `collection` objects: - -* `collection.lookupByKeys(keys)` -* `collection.removeByKeys(keys)` - -These commands can be used to perform multi-document lookup and removal operations efficiently -from the ArangoShell. The argument to these operations is an array of document keys. - -These commands can also be used via the HTTP REST API. Their endpoints are: - -* `PUT /_api/simple/lookup-by-keys` -* `PUT /_api/simple/remove-by-keys` - -### Collection export HTTP REST API - -ArangoDB now provides a dedicated collection export API, which can take snapshots of entire -collections more efficiently than the general-purpose cursor API. The export API is useful -to transfer the contents of an entire collection to a client application. It provides optional -filtering on specific attributes. - -The export API is available at endpoint `POST /_api/export?collection=...`. The API has the -same return value structure as the already established cursor API (`POST /_api/cursor`). - -An introduction to the export API is given in this blog post: -http://jsteemann.github.io/blog/2015/04/04/more-efficient-data-exports/ - -AQL improvements ----------------- - -### EDGES AQL Function - -The AQL function EDGES got a new fifth optional parameter, which must be an -object if specified. Right now only one option is available for it: - -* `includeVertices` this is a boolean parameter that allows to modify the result of - `EDGES()`. The default value for `includeVertices` is `false`, which does not have - any effect. Setting it to `true` will modify the result, such that also the connected - vertices are returned along with the edges: - - { vertex: , edge: } - -### Subquery optimizations for AQL queries - -This optimization avoids copying intermediate results into subqueries that are not required -by the subquery. - -A brief description can be found here: -http://jsteemann.github.io/blog/2015/05/04/subquery-optimizations/ - -### Return value optimization for AQL queries - -This optimization avoids copying the final query result inside the query's main `ReturnNode`. - -A brief description can be found here: -http://jsteemann.github.io/blog/2015/05/04/return-value-optimization-for-aql/ - -### Speed up AQL queries containing big `IN` lists for index lookups - -`IN` lists used for index lookups had performance issues in previous versions of ArangoDB. -These issues have been addressed in 2.6 so using bigger `IN` lists for filtering is much -faster. - -A brief description can be found here: -http://jsteemann.github.io/blog/2015/05/07/in-list-improvements/ - -### Added alternative implementation for AQL COLLECT - -The alternative method uses a hash table for grouping and does not require its input elements -to be sorted. It will be taken into account by the optimizer for `COLLECT` statements that do -not use an `INTO` clause. - -In case a `COLLECT` statement can use the hash table variant, the optimizer will create an extra -plan for it at the beginning of the planning phase. In this plan, no extra `SORT` node will be -added in front of the `COLLECT` because the hash table variant of `COLLECT` does not require -sorted input. Instead, a `SORT` node will be added after it to sort its output. This `SORT` node -may be optimized away again in later stages. If the sort order of the result is irrelevant to -the user, adding an extra `SORT null` after a hash `COLLECT` operation will allow the optimizer to -remove the sorts altogether. - -In addition to the hash table variant of `COLLECT`, the optimizer will modify the original plan -to use the regular `COLLECT` implementation. As this implementation requires sorted input, the -optimizer will insert a `SORT` node in front of the `COLLECT`. This `SORT` node may be optimized -away in later stages. - -The created plans will then be shipped through the regular optimization pipeline. In the end, -the optimizer will pick the plan with the lowest estimated total cost as usual. The hash table -variant does not require an up-front sort of the input, and will thus be preferred over the -regular `COLLECT` if the optimizer estimates many input elements for the `COLLECT` node and -cannot use an index to sort them. - -The optimizer can be explicitly told to use the regular *sorted* variant of `COLLECT` by -suffixing a `COLLECT` statement with `OPTIONS { "method" : "sorted" }`. This will override the -optimizer guesswork and only produce the *sorted* variant of `COLLECT`. - -A blog post on the new `COLLECT` implementation can be found here: -http://jsteemann.github.io/blog/2015/04/22/collecting-with-a-hash-table/ - -### Simplified return value syntax for data-modification AQL queries - -ArangoDB 2.4 since version allows to return results from data-modification AQL queries. The -syntax for this was quite limited and verbose: - -``` -FOR i IN 1..10 - INSERT { value: i } IN test - LET inserted = NEW - RETURN inserted -``` -The `LET inserted = NEW RETURN inserted` was required literally to return the inserted -documents. No calculations could be made using the inserted documents. - -This is now more flexible. After a data-modification clause (e.g. `INSERT`, `UPDATE`, `REPLACE`, -`REMOVE`, `UPSERT`) there can follow any number of `LET` calculations. These calculations can -refer to the pseudo-values `OLD` and `NEW` that are created by the data-modification statements. - -This allows returning projections of inserted or updated documents, e.g.: - -``` -FOR i IN 1..10 - INSERT { value: i } IN test - RETURN { _key: NEW._key, value: i } -``` - -Still not every construct is allowed after a data-modification clause. For example, no functions -can be called that may access documents. - -More information can be found here: -http://jsteemann.github.io/blog/2015/03/27/improvements-for-data-modification-queries/ - -### Added AQL `UPSERT` statement - -This adds an `UPSERT` statement to AQL that is a combination of both `INSERT` and `UPDATE` / -`REPLACE`. The `UPSERT` will search for a matching document using a user-provided example. -If no document matches the example, the *insert* part of the `UPSERT` statement will be -executed. If there is a match, the *update* / *replace* part will be carried out: - -``` -UPSERT { page: 'index.html' } /* search example */ -INSERT { page: 'index.html', pageViews: 1 } /* insert part */ -UPDATE { pageViews: OLD.pageViews + 1 } /* update part */ -IN pageViews -``` - -`UPSERT` can be used with an `UPDATE` or `REPLACE` clause. The `UPDATE` clause will perform -a partial update of the found document, whereas the `REPLACE` clause will replace the found -document entirely. The `UPDATE` or `REPLACE` parts can refer to the pseudo-value `OLD`, which -contains all attributes of the found document. - -`UPSERT` statements can optionally return values. In the following query, the return -attribute `found` will return the found document before the `UPDATE` was applied. If no -document was found, `found` will contain a value of `null`. The `updated` result attribute will -contain the inserted / updated document: - -``` -UPSERT { page: 'index.html' } /* search example */ -INSERT { page: 'index.html', pageViews: 1 } /* insert part */ -UPDATE { pageViews: OLD.pageViews + 1 } /* update part */ -IN pageViews -RETURN { found: OLD, updated: NEW } -``` - -A more detailed description of `UPSERT` can be found here: -http://jsteemann.github.io/blog/2015/03/27/preview-of-the-upsert-command/ - -### Miscellaneous AQL changes - -When errors occur inside AQL user functions, the error message will now contain a stacktrace, -indicating the line of code in which the error occurred. This should make debugging AQL user functions -easier. - -Web Admin Interface -------------------- - -ArangoDB's built-in web interface now uses sessions. Session information is stored in cookies, so clients -using the web interface must accept cookies in order to use it. - -The new startup option `--server.session-timeout` can be used for adjusting the session lifetime. - -The AQL editor in the web interface now provides an *explain* functionality, which can be used for inspecting and performance-tuning AQL queries. -The query execution time is now also displayed in the AQL editor. - -Foxx apps that require configuration or are missing dependencies are now indicated in the app overview and details. - -Foxx improvements ------------------ - -### Configuration and Dependencies - -Foxx app manifests can now define configuration options, as well as dependencies on other Foxx apps. - -An introduction to Foxx configurations can be found in the blog: -https://www.arangodb.com/2015/05/reusable-foxx-apps-with-configurations/ - -And the blog post on Foxx dependencies can be found here: -https://www.arangodb.com/2015/05/foxx-dependencies-for-more-composable-foxx-apps/ - -### Mocha Tests - -You can now write tests for your Foxx apps using the Mocha testing framework: -https://www.arangodb.com/2015/04/testing-foxx-mocha/ - -A recipe for writing tests for your Foxx apps can be found in the cookbook: -https://docs.arangodb.com/2.8/Cookbook/FoxxTesting.html - -### API Documentation - -The API documentation has been updated to Swagger 2. You can now also mount API -documentation in your own Foxx apps. - -Also see the blog post introducing this feature: -https://www.arangodb.com/2015/05/document-your-foxx-apps-with-swagger-2/ - -### Custom Scripts and Foxx Queue - -In addition to the existing *setup* and *teardown* scripts you can now define custom scripts -in your Foxx manifest and invoke these using the web admin interface or the Foxx manager CLI. -These scripts can now also take positional arguments and export return values. - -Job types for the Foxx Queue can now be defined as a script name and app mount path allowing -the use of Foxx scripts as job types. The pre-2.6 job types are known to cause issues when -restarting the server and are error-prone; we strongly recommended converting any existing -job types to the new format. - -Client tools ------------- - -The default configuration value for the option `--server.request-timeout` was increased from -300 to 1200 seconds for all client tools (arangosh, arangoimp, arangodump, arangorestore). - -The default configuration value for the option `--server.connect-timeout` was increased from -3 to 5 seconds for client tools (arangosh, arangoimp, arangodump, arangorestore). - -### Arangorestore - -The option `--create-database` was added for arangorestore. - -Setting this option to `true` will now create the target database if it does not exist. When creating -the target database, the username and passwords passed to arangorestore will be used to create an -initial user for the new database. - -The default value for this option is `false`. - -### Arangoimp - -Arangoimp can now optionally update or replace existing documents, provided the import data contains -documents with `_key` attributes. - -Previously, the import could be used for inserting new documents only, and re-inserting a document with -an existing key would have failed with a *unique key constraint violated* error. - -The behavior of arangoimp (insert, update, replace on duplicate key) can now be controlled with the -option `--on-duplicate`. The option can have one of the following values: - -* `error`: when a unique key constraint error occurs, do not import or update the document but - report an error. This is the default. - -* `update`: when a unique key constraint error occurs, try to (partially) update the existing - document with the data specified in the import. This may still fail if the document would - violate secondary unique indexes. Only the attributes present in the import data will be - updated and other attributes already present will be preserved. The number of updated documents - will be reported in the `updated` attribute of the HTTP API result. - -* `replace`: when a unique key constraint error occurs, try to fully replace the existing - document with the data specified in the import. This may still fail if the document would - violate secondary unique indexes. The number of replaced documents will be reported in the - `updated` attribute of the HTTP API result. - -* `ignore`: when a unique key constraint error occurs, ignore this error. There will be no - insert, update or replace for the particular document. Ignored documents will be reported - separately in the `ignored` attribute of the HTTP API result. - -The default value is `error`. - -A few examples for using arangoimp with the `--on-duplicate` option can be found here: -http://jsteemann.github.io/blog/2015/04/14/updating-documents-with-arangoimp/ - -Miscellaneous changes ---------------------- - -* Some Linux-based ArangoDB packages are now using tcmalloc for memory allocator. - -* Upgraded ICU library to version 54. This increases performance in many places. - -* Allow to split an edge index into buckets which are resized individually. The default value is `1`, - resembling the pre-2.6 behavior. Using multiple buckets will lead to the index entries being - distributed to the individual buckets, with each bucket being responsible only for a fraction of - the total index entries. Using multiple buckets may lead to more frequent but much faster index - bucket resizes, and is recommended for bigger edge collections. - - -* Default configuration value for option `--server.backlog-size` was changed from 10 to 64. - -* Default configuration value for option `--database.ignore-datafile-errors` was changed from `true` to `false` - -* Document keys can now contain `@` and `.` characters - -* Fulltext index can now index text values contained in direct sub-objects of the indexed attribute. - - Previous versions of ArangoDB only indexed the attribute value if it was a string. Sub-attributes - of the index attribute were ignored when fulltext indexing. - - Now, if the index attribute value is an object, the object's values will each be included in the - fulltext index if they are strings. If the index attribute value is an array, the array's values - will each be included in the fulltext index if they are strings. - - For example, with a fulltext index present on the `translations` attribute, the following text - values will now be indexed: - - var c = db._create("example"); - c.ensureFulltextIndex("translations"); - c.insert({ translations: { en: "fox", de: "Fuchs", fr: "renard", ru: "лиса" } }); - c.insert({ translations: "Fox is the English translation of the German word Fuchs" }); - c.insert({ translations: [ "ArangoDB", "document", "database", "Foxx" ] }); - - c.fulltext("translations", "лиса").toArray(); // returns only first document - c.fulltext("translations", "Fox").toArray(); // returns first and second documents - c.fulltext("translations", "prefix:Fox").toArray(); // returns all three documents - -* Added configuration option `--server.foxx-queues-poll-interval` - - This startup option controls the frequency with which the Foxx queues manager is checking - the queue (or queues) for jobs to be executed. - - The default value is `1` second. Lowering this value will result in the queue manager waking - up and checking the queues more frequently, which may increase CPU usage of the server. - When not using Foxx queues, this value can be raised to save some CPU time. - -* Added configuration option `--server.foxx-queues` - - This startup option controls whether the Foxx queue manager will check queue and job entries - in the `_system` database only. Restricting the Foxx queue manager to the `_system` database - will lead to the queue manager having to check only the queues collection of a single database, - whereas making it check the queues of all databases might result in more work to be done and - more CPU time to be used by the queue manager. diff --git a/Documentation/Books/Manual/ReleaseNotes/NewFeatures27.md b/Documentation/Books/Manual/ReleaseNotes/NewFeatures27.md deleted file mode 100644 index b9249193ba20..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/NewFeatures27.md +++ /dev/null @@ -1,605 +0,0 @@ -Features and Improvements in ArangoDB 2.7 -========================================= - -The following list shows in detail which features have been added or improved in -ArangoDB 2.7. ArangoDB 2.7 also contains several bugfixes that are not listed -here. For a list of bugfixes, please consult the -[CHANGELOG](https://github.com/arangodb/arangodb/blob/devel/CHANGELOG). - -Performance improvements ------------------------- - -### Index buckets - -The primary indexes and hash indexes of collections can now be split into multiple -index buckets. This option was available for edge indexes only in ArangoDB 2.6. - -A bucket can be considered a container for a specific range of index values. For -primary, hash and edge indexes, determining the responsible bucket for an index -value is done by hashing the actual index value and applying a simple arithmetic -operation on the hash. - -Because an index value will be present in at most one bucket and buckets are -independent, using multiple buckets provides the following benefits: - -* initially building the in-memory index data can be parallelized even for a - single index, with one thread per bucket (or with threads being responsible - for more than one bucket at a time). This can help reducing the loading time - for collections. - -* resizing an index when it is about to run out of reserve space is performed - per bucket. As each bucket only contains a fraction of the entire index, - resizing and rehashing a bucket is much faster and less intrusive than resizing - and rehashing the entire index. - -When creating new collections, the default number of index buckets is `8` since -ArangoDB 2.7. In previous versions, the default value was `1`. The number of -buckets can also be adjusted for existing collections so they can benefit from -the optimizations. The number of index buckets can be set for a collection at -any time by using a collection's `properties` function: - -```js - db.collection.properties({ indexBuckets: 16 }); -``` - -The number of index buckets must be a power of 2. - -Please note that for building the index data for multiple buckets in parallel -it is required that a collection contains a significant amount of documents because -for a low number of documents the overhead of parallelization will outweigh its -benefits. The current threshold value is 256k documents, but this value may change -in future versions of ArangoDB. Additionally, the configuration option -`--database.index-threads` will determine how many parallel threads may be used -for building the index data. - - -### Faster update and remove operations in non-unique hash indexes - -The unique hash indexes in ArangoDB provided an amortized O(1) lookup, insert, update -and remove performance. Non-unique hash indexes provided amortized O(1) insert -performance, but had worse performance for update and remove operations for -non-unique values. For documents with the same index value, they maintained a -list of collisions. When a document was updated or removed, that exact document -had to be found in the collisions list for the index value. While getting to the -start of the collisions list was O(1), scanning the list had O(n) performance in -the worst case (with n being the number of documents with the same index value). -Overall, this made update and remove operations in non-unique hash indexes -slow if the index contained many duplicate values. - -This has been changed in ArangoDB 2.7 so that non-unique hash indexes now also -provide update and remove operations with an amortized complexity of O(1), even -if there are many duplicates. - -Resizing non-unique hash indexes now also doesn't require looking into the -document data (which may involve a disk access) because the index maintains some -internal cache value per document. When resizing and rehashing the index (or -an index bucket), the index will first compare only the cache values before -peeking into the actual documents. This change can also lead to reduced index -resizing times. - - -### Throughput enhancements - -The ArangoDB-internal implementations for dispatching requests, keeping statistics -and assigning V8 contexts to threads have been improved in order to use less -locks. These changes allow higher concurrency and throughput in these components, -which can also make the server handle more requests in a given period of time. - -What gains can be expected depends on which operations are executed, but there -are real-world cases in which [throughput increased by between 25 % and 70 % when -compared to 2.6](https://www.arangodb.com/2015/08/throughput-enhancements/). - - -### Madvise hints - -The Linux variant for ArangoDB provides the OS with madvise hints about index -memory and datafile memory. These hints can speed up things when memory is tight, -in particular at collection load time but also for random accesses later. There -is no formal guarantee that the OS actually uses the madvise hints provided by -ArangoDB, but actual measurements have shown improvements for loading bigger -collections. - - -AQL improvements ----------------- - -### Additional date functions - -ArangoDB 2.7 provides several extra AQL functions for date and time -calculation and manipulation. These functions were contributed -by GitHub users @CoDEmanX and @friday. A big thanks for their work! - -The following extra date functions are available from 2.7 on: - -* `DATE_DAYOFYEAR(date)`: Returns the day of year number of *date*. - The return values range from 1 to 365, or 366 in a leap year respectively. - -* `DATE_ISOWEEK(date)`: Returns the ISO week date of *date*. - The return values range from 1 to 53. Monday is considered the first day of the week. - There are no fractional weeks, thus the last days in December may belong to the first - week of the next year, and the first days in January may be part of the previous year's - last week. - -* `DATE_LEAPYEAR(date)`: Returns whether the year of *date* is a leap year. - -* `DATE_QUARTER(date)`: Returns the quarter of the given date (1-based): - * 1: January, February, March - * 2: April, May, June - * 3: July, August, September - * 4: October, November, December - -- *DATE_DAYS_IN_MONTH(date)*: Returns the number of days in *date*'s month (28..31). - -* `DATE_ADD(date, amount, unit)`: Adds *amount* given in *unit* to *date* and - returns the calculated date. - - *unit* can be either of the following to specify the time unit to add or - subtract (case-insensitive): - - y, year, years - - m, month, months - - w, week, weeks - - d, day, days - - h, hour, hours - - i, minute, minutes - - s, second, seconds - - f, millisecond, milliseconds - - *amount* is the number of *unit*s to add (positive value) or subtract - (negative value). - -* `DATE_SUBTRACT(date, amount, unit)`: Subtracts *amount* given in *unit* from - *date* and returns the calculated date. - - It works the same as `DATE_ADD()`, except that it subtracts. It is equivalent - to calling `DATE_ADD()` with a negative amount, except that `DATE_SUBTRACT()` - can also subtract ISO durations. Note that negative ISO durations are not - supported (i.e. starting with `-P`, like `-P1Y`). - -* `DATE_DIFF(date1, date2, unit, asFloat)`: Calculate the difference - between two dates in given time *unit*, optionally with decimal places. - Returns a negative value if *date1* is greater than *date2*. - -* `DATE_COMPARE(date1, date2, unitRangeStart, unitRangeEnd)`: Compare two - partial dates and return true if they match, false otherwise. The parts to - compare are defined by a range of time units. - - The full range is: years, months, days, hours, minutes, seconds, milliseconds. - Pass the unit to start from as *unitRangeStart*, and the unit to end with as - *unitRangeEnd*. All units in between will be compared. Leave out *unitRangeEnd* - to only compare *unitRangeStart*. - -* `DATE_FORMAT(date, format)`: Format a date according to the given format string. - It supports the following placeholders (case-insensitive): - - %t: timestamp, in milliseconds since midnight 1970-01-01 - - %z: ISO date (0000-00-00T00:00:00.000Z) - - %w: day of week (0..6) - - %y: year (0..9999) - - %yy: year (00..99), abbreviated (last two digits) - - %yyyy: year (0000..9999), padded to length of 4 - - %yyyyyy: year (-009999 .. +009999), with sign prefix and padded to length of 6 - - %m: month (1..12) - - %mm: month (01..12), padded to length of 2 - - %d: day (1..31) - - %dd: day (01..31), padded to length of 2 - - %h: hour (0..23) - - %hh: hour (00..23), padded to length of 2 - - %i: minute (0..59) - - %ii: minute (00..59), padded to length of 2 - - %s: second (0..59) - - %ss: second (00..59), padded to length of 2 - - %f: millisecond (0..999) - - %fff: millisecond (000..999), padded to length of 3 - - %x: day of year (1..366) - - %xxx: day of year (001..366), padded to length of 3 - - %k: ISO week date (1..53) - - %kk: ISO week date (01..53), padded to length of 2 - - %l: leap year (0 or 1) - - %q: quarter (1..4) - - %a: days in month (28..31) - - %mmm: abbreviated English name of month (Jan..Dec) - - %mmmm: English name of month (January..December) - - %www: abbreviated English name of weekday (Sun..Sat) - - %wwww: English name of weekday (Sunday..Saturday) - - %&: special escape sequence for rare occasions - - %%: literal % - - %: ignored - - -### RETURN DISTINCT - -To return unique values from a query, AQL now provides the `DISTINCT` keyword. -It can be used as a modifier for `RETURN` statements, as a shorter alternative to -the already existing `COLLECT` statement. - -For example, the following query only returns distinct (unique) `status` -attribute values from the collection: - -``` - FOR doc IN collection - RETURN DISTINCT doc.status -``` - -`RETURN DISTINCT` is not allowed on the top-level of a query if there is no `FOR` -loop in front of it. `RETURN DISTINCT` is allowed in subqueries. - -`RETURN DISTINCT` ensures that the values returned are distinct (unique), but does -not guarantee any order of results. In order to have certain result order, an -additional `SORT` statement must be added to a query. - - -### Shorthand object notation - -AQL now provides a shorthand notation for object literals in the style of ES6 -object literals: - -``` - LET name = "Peter" - LET age = 42 - RETURN { name, age } -``` - -This is equivalent to the previously available canonical form, which is still -available and supported: - -``` - LET name = "Peter" - LET age = 42 - RETURN { name : name, age : age } -``` - - -### Array expansion improvements - -The already existing [\*] operator has been improved with optional -filtering and projection and limit capabilities. - -For example, consider the following example query that filters values from -an array attribute: -``` - FOR u IN users - RETURN { - name: u.name, - friends: ( - FOR f IN u.friends - FILTER f.age > u.age - RETURN f.name - ) - } -``` - -With the [\*] operator, this query can be simplified to - -``` - FOR u IN users - RETURN { name: u.name, friends: u.friends[* FILTER CURRENT.age > u.age].name } -``` - -The pseudo-variable *CURRENT* can be used to access the current array element. -The `FILTER` condition can refer to `CURRENT` or any variables valid in the -outer scope. - -To return a projection of the current element, there can now be an inline `RETURN`: - -``` - FOR u IN users - RETURN u.friends[* RETURN CONCAT(CURRENT.name, " is a friend of ", u.name)] -``` - -which is the simplified variant for: - -``` - FOR u IN users - RETURN ( - FOR friend IN u.friends - RETURN CONCAT(friend.name, " is a friend of ", u.name) - ) -``` - - -### Array contraction - -In order to collapse (or flatten) results in nested arrays, AQL now provides the [\*\*] -operator. It works similar to the [\*] operator, but additionally collapses nested -arrays. How many levels are collapsed is determined by the amount of \* characters used. - -For example, consider the following query that produces a nested result: - -``` - FOR u IN users - RETURN u.friends[*].name -``` - -The [\*\*] operator can now be applied to get rid of the nested array and -turn it into a flat array. We simply apply the [\*\*] on the previous query -result: - -``` - RETURN ( - FOR u IN users RETURN u.friends[*].name - )[**] -``` - - -### Template query strings - -Assembling query strings in JavaScript has been error-prone when using simple -string concatenation, especially because plain JavaScript strings do not have -multiline-support, and because of potential parameter injection issues. While -multiline query strings can be assembled with ES6 template strings since ArangoDB 2.5, -and query bind parameters are there since ArangoDB 1.0 to prevent parameter -injection, there was no JavaScript-y solution to combine these. - -ArangoDB 2.7 now provides an ES6 template string generator function that can -be used to easily and safely assemble AQL queries from JavaScript. JavaScript -variables and expressions can be used easily using regular ES6 template string -substitutions: - -```js - let name = 'test'; - let attributeName = '_key'; - - let query = aqlQuery`FOR u IN users - FILTER u.name == ${name} - RETURN u.${attributeName}`; - db._query(query); -``` - -This is more legible than when using a plain JavaScript string and also does -not require defining the bind parameter values separately: - -```js - let name = 'test'; - let attributeName = '_key'; - - let query = "FOR u IN users " + - "FILTER u.name == @name " + - "RETURN u.@attributeName"; - db._query(query, { - name, - attributeName - }); -``` - -The `aqlQuery` template string generator will also handle collection objects -automatically: - -```js - db._query(aqlQuery`FOR u IN ${ db.users } RETURN u.name`); -``` - -Note that while template strings are available in the JavaScript functions provided -to build queries, they aren't a feature of AQL itself. AQL could always handle -multiline query strings and provided bind parameters (`@...`) for separating -the query string and the parameter values. The `aqlQuery` template string -generator function will take care of this separation, too, but will do it -*behind the scenes*. - - -### AQL query result cache - -The AQL query result cache can optionally cache the complete results of all or -just selected AQL queries. It can be operated in the following modes: - -* `off`: the cache is disabled. No query results will be stored -* `on`: the cache will store the results of all AQL queries unless their `cache` - attribute flag is set to `false` -* `demand`: the cache will store the results of AQL queries that have their - `cache` attribute set to `true`, but will ignore all others - -The mode can be set at server startup using the `--database.query-cache-mode` -configuration option and later changed at runtime. The default value is `off`, -meaning that the query result cache is disabled. This is because the cache may -consume additional memory to keep query results, and also because it must be -invalidated when changes happen in collections for which results have been -cached. - -The query result cache may therefore have positive or negative effects on query -execution times, depending on the workload: it will not make much sense turning -on the cache in write-only or write-mostly scenarios, but the cache may be -very beneficial in case workloads are read-only or read-mostly, and query are -complex. - -If the query cache is operated in `demand` mode, it can be controlled per query -if the cache should be checked for a result. - - -### Miscellaneous AQL changes - -### Optimizer - -The AQL optimizer rule `patch-update-statements` has been added. This rule can -optimize certain AQL UPDATE queries that update documents in a collection -that they also iterate over. - -For example, the following query reads documents from a collection in order -to update them: - -``` - FOR doc IN collection - UPDATE doc WITH { newValue: doc.oldValue + 1 } IN collection -``` - -In this case, only a single collection is affected by the query, and there is -no index lookup involved to find the to-be-updated documents. In this case, the -UPDATE query does not require taking a full, memory-intensive snapshot of the -collection, but it can be performed in small chunks. This can lead to memory -savings when executing such queries. - -### Function call arguments optimization - -This optimization will lead to arguments in function calls inside AQL queries -not being copied but being passed by reference. This may speed up calls to -functions with bigger argument values or queries that call AQL functions a lot -of times. - - -Web Admin Interface -------------------- - -The web interface now has a new design. - -The "Applications" tab in the web interfaces has been renamed to "Services". - -The ArangoDB API documentation has been moved from the "Tools" menu to the "Links" menu. -The new documentation is based on Swagger 2.0 and opens in a separate web page. - - -Foxx improvements ------------------ - -### ES2015 Classes - -All Foxx constructors have been replaced with ES2015 classes and can be extended using the class syntax. -The `extend` method is still supported at the moment but will become deprecated in ArangoDB 2.8 and removed in ArangoDB 2.9. - -**Before:** - -```js -var Foxx = require('org/arangodb/foxx'); -var MyModel = Foxx.Model.extend({ - // ... - schema: {/* ... */} -}); -``` - -**After:** - -```js -var Foxx = require('org/arangodb/foxx'); -class MyModel extends Foxx.Model { - // ... -} -MyModel.prototype.schema = {/* ... */}; -``` - -### Confidential configuration - -It is now possible to specify configuration options with the type `password`. The password type is equivalent to the text type but will be masked in the web frontend to prevent accidental exposure of confidential options like API keys and passwords when configuring your Foxx application. - -### Dependencies - -The syntax for specifying dependencies in manifests has been extended to allow specifying optional dependencies. Unmet optional dependencies will not prevent an app from being mounted. The traditional shorthand syntax for specifying non-optional dependencies will still be supported in the upcoming versions of ArangoDB. - -**Before:** - -```json -{ - ... - "dependencies": { - "notReallyNeeded": "users:^1.0.0", - "totallyNecessary": "sessions:^1.0.0" - } -} -``` - -**After:** - -```json -{ - "dependencies": { - "notReallyNeeded": { - "name": "users", - "version": "^1.0.0", - "required": false - }, - "totallyNecessary": { - "name": "sessions", - "version": "^1.0.0" - } - } -} -``` - - -Replication ------------ - -The existing replication HTTP API has been extended with methods that replication -clients can use to determine whether a given date, identified by a tick value, is -still present on a master for replication. By calling these APIs, clients can -make an informed decision about whether the master can still provide all missing -data starting from the point up to which the client had already synchronized. -This can be helpful in case a replication client is re-started after a pause. - -Master servers now also track up the point up to which they have sent changes to -clients for replication. This information can be used to determine the point of data -that replication clients have received from the master, and if and how far approximately -they lag behind. - -Finally, restarting the replication applier on a slave server has been made more -robust in case the applier was stopped while there were pending transactions on -the master server, and re-starting the replication applier needs to restore the -state of these transactions. - - -Client tools ------------- - -The filenames in dumps created by arangodump now contain not only the name of the -dumped collection, but also an additional 32-digit hash value. This is done to -prevent overwriting dump files in case-insensitive file systems when there exist -multiple collections with the same name (but with different cases). - -For example, if a database had two collections *test* and *Test*, previous -versions of arangodump created the following files: - -* `test.structure.json` and `test.data.json` for collection *test* -* `Test.structure.json` and `Test.data.json` for collection *Test* - -This did not work in case-insensitive filesystems, because the files for the -second collection would have overwritten the files of the first. arangodump in -2.7 will create the unique files in this case, by appending the 32-digit hash -value to the collection name in all case. These filenames will be unambiguous -even in case-insensitive filesystems. - - -Miscellaneous changes ---------------------- - -### Better control-C support in arangosh - -When CTRL-C is pressed in arangosh, it will now abort the locally running command -(if any). If no command was running, pressing CTRL-C will print a `^C` first. -Pressing CTRL-C again will then quit arangosh. - -CTRL-C can also be used to reset the current prompt while entering complex nested -objects which span multiple input lines. - -CTRL-C support has been added to the ArangoShell versions built with Readline-support -(Linux and macOS only). The Windows version of ArangoDB uses a different library for -handling input, and support for CTRL-C has not been added there yet. - - -### Start / stop - -Linux startup scripts and systemd configuration for arangod now try to adjust the -NOFILE (number of open files) limits for the process. The limit value is set to -131072 (128k) when ArangoDB is started via start/stop commands. - -This will prevent arangod running out of available file descriptors in case of -many parallel HTTP connections or large collections with many datafiles. - -Additionally, when ArangoDB is started/stopped manually via the start/stop commands, -the main process will wait for up to 10 seconds after it forks the supervisor -and arangod child processes. If the startup fails within that period, the -start/stop script will fail with a non-zero exit code, allowing any invoking -scripts to handle this error. Previous versions always returned an exit code of -0, even when arangod couldn't be started. - -If the startup of the supervisor or arangod is still ongoing after 10 seconds, -the main program will still return with exit code 0 in order to not block any -scripts. The limit of 10 seconds is arbitrary because the time required for an -arangod startup is not known in advance. - - -### Non-sparse logfiles - -WAL logfiles and datafiles created by arangod are now non-sparse. This prevents -SIGBUS signals being raised when a memory-mapped region backed by a sparse datafile -was accessed and the memory region was not actually backed by disk, for example -because the disk ran out of space. - -arangod now always fully allocates the disk space required for a logfile or datafile -when it creates one, so the memory region can always be backed by disk, and memory -can be accessed without SIGBUS being raised. - diff --git a/Documentation/Books/Manual/ReleaseNotes/NewFeatures28.md b/Documentation/Books/Manual/ReleaseNotes/NewFeatures28.md deleted file mode 100644 index e5566bf2baf8..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/NewFeatures28.md +++ /dev/null @@ -1,370 +0,0 @@ -Features and Improvements in ArangoDB 2.8 -========================================= - -The following list shows in detail which features have been added or improved in -ArangoDB 2.8. ArangoDB 2.8 also contains several bugfixes that are not listed -here. For a list of bugfixes, please consult the -[CHANGELOG](https://github.com/arangodb/arangodb/blob/devel/CHANGELOG). - -AQL improvements ----------------- - -### AQL Graph Traversals / Pattern Matching - -AQL offers a new feature to traverse over a graph without writing JavaScript functions -but with all the other features you know from AQL. For this purpose, a special version of -`FOR variableName IN expression` has been introduced. - -This special version has the following format: `FOR vertex-variable, edge-variable, path-variable IN traversal-expression`, -where `traversal-expression` has the following format: -`[depth] direction start-vertex graph-definition` -with the following input parameters: - -* depth (optional): defines how many steps are executed. - The value can either be an integer value (e.g. `3`) or a range of integer values (e.g. `1..5`). The default is 1. -* direction: defines which edge directions are followed. Can be either `OUTBOUND`, `INBOUND` or `ANY`. -* start-vertex: defines where the traversal is started. Must be an `_id` value or a document. -* graph-definition: defines which edge collections are used for the traversal. - Must be either `GRAPH graph-name` for graphs created with the graph-module, or a list of edge collections `edge-col1, edge-col2, .. edge-colN`. - -The three output variables have the following semantics: - -* vertex-variable: The last visited vertex. -* edge-variable: The last visited edge (optional). -* path-variable: The complete path from start-vertex to vertex-variable (optional). - -The traversal statement can be used in the same way as the original `FOR variableName IN expression`, -and can be combined with filters and other AQL constructs. - -As an example one can now find the friends of a friend for a certain user with this AQL statement: - -``` -FOR foaf, e, path IN 2 ANY @startUser GRAPH "relations" - FILTER path.edges[0].type == "friend" - FILTER path.edges[1].type == "friend" - FILTER foaf._id != @startUser - RETURN DISTINCT foaf -``` - -Optimizer rules have been implemented to gain performance of the traversal statement. -These rules move filter statements into the traversal statement s.t. paths which can never -pass the filter are not emitted to the variables. - -As an example take the query above and assume there are edges that do not have `type == "friend"`. -If in the first edge step there is such a non-friend edge the second steps will never -be computed for these edges as they cannot fulfill the filter condition. - -### Array Indexes - -Hash indexes and skiplist indexes can now optionally be defined for array values -so that they index individual array members instead of the entire array value. - -To define an index for array values, the attribute name is extended with the -expansion operator `[*]` in the index definition. - -Example: - -``` -db._create("posts"); -db.posts.ensureHashIndex("tags[*]"); -``` - -When given the following document - -```json -{ - "tags": [ - "AQL", - "ArangoDB", - "Index" - ] -} -``` - -this index will now contain the individual values `"AQL"`, `"ArangoDB"` and `"Index"`. - -Now the index can be used for finding all documents having `"ArangoDB"` somewhere in -their `tags` array using the following AQL query: - -``` -FOR doc IN posts - FILTER "ArangoDB" IN doc.tags[*] - RETURN doc -``` - -It is also possible to create an index on sub-attributes of array values. This makes -sense when the index attribute is an array of objects, e.g. - -```js -db._drop("posts"); -db._create("posts"); -db.posts.ensureIndex({ type: "hash", fields: [ "tags[*].name" ] }); -db.posts.insert({ tags: [ { name: "AQL" }, { name: "ArangoDB" }, { name: "Index" } ] }); -db.posts.insert({ tags: [ { name: "AQL" }, { name: "2.8" } ] }); -``` - -The following query will then use the array index: - -``` -FOR doc IN posts - FILTER 'AQL' IN doc.tags[*].name - RETURN doc -``` - -Array values will automatically be de-duplicated before being inserted into an array index. - -Please note that filtering using array indexes only works from within AQL queries and -only if the query filters on the indexed attribute using the `IN` operator. The other -comparison operators (`==`, `!=`, `>`, `>=`, `<`, `<=`) currently do not use array -indexes. - - -### Optimizer improvements - -The AQL query optimizer can now use indexes if multiple filter conditions on attributes of -the same collection are combined with logical ORs, and if the usage of indexes would completely -cover these conditions. - -For example, the following queries can now use two independent indexes on `value1` and `value2` -(the latter query requires that the indexes are skiplist indexes due to usage of the `<` and `>` -comparison operators): - -``` -FOR doc IN collection FILTER doc.value1 == 42 || doc.value2 == 23 RETURN doc -FOR doc IN collection FILTER doc.value1 < 42 || doc.value2 > 23 RETURN doc -``` - -The new optimizer rule "sort-in-values" can now pre-sort the right-hand side operand -of `IN` and `NOT IN` operators so the operation can use a binary search with logarithmic -complexity instead of a linear search. The rule will be applied when the right-hand side -operand of an `IN` or `NOT IN` operator in a filter condition is a variable that is defined -in a different loop/scope than the operator itself. Additionally, the filter condition must -consist of solely the `IN` or `NOT IN` operation in order to avoid any side-effects. - -The rule will kick in for a queries such as the following: - -``` -LET values = /* some runtime expression here */ -FOR doc IN collection - FILTER doc.value IN values - RETURN doc -``` - -It will not be applied for the followig queries, because the right-hand side operand of the -`IN` is either not a variable, or because the FILTER condition may have side effects: - -``` -FOR doc IN collection - FILTER doc.value IN /* some runtime expression here */ - RETURN doc -``` - -``` -LET values = /* some runtime expression here */ -FOR doc IN collection - FILTER FUNCTION(doc.values) == 23 && doc.value IN values - RETURN doc -``` - - -### AQL functions added - -The following AQL functions have been added in 2.8: - -* `POW(base, exponent)`: returns the *base* to the exponent *exp* - -* `UNSET_RECURSIVE(document, attributename, ...)`: recursively removes the attributes - *attributename* (can be one or many) from *document* and its sub-documents. All other - attributes will be preserved. - Multiple attribute names can be specified by either passing multiple individual string argument - names, or by passing an array of attribute names: - - UNSET_RECURSIVE(doc, '_id', '_key', 'foo', 'bar') - UNSET_RECURSIVE(doc, [ '_id', '_key', 'foo', 'bar' ]) - -* `IS_DATESTRING(value)`: returns true if *value* is a string that can be used in a date function. - This includes partial dates such as *2015* or *2015-10* and strings containing - invalid dates such as *2015-02-31*. The function will return false for all - non-string values, even if some of them may be usable in date functions. - - -### Miscellaneous improvements - -* the ArangoShell now provides the convenience function `db._explain(query)` for retrieving - a human-readable explanation of AQL queries. This function is a shorthand for - `require("org/arangodb/aql/explainer").explain(query)`. - -* the AQL query optimizer now automatically converts `LENGTH(collection-name)` to an optimized - expression that returns the number of documents in a collection. Previous versions of - ArangoDB returned a warning when using this expression and also enumerated all documents - in the collection, which was inefficient. - -* improved performance of skipping over many documents in an AQL query when no - indexes and no filters are used, e.g. - - FOR doc IN collection - LIMIT 1000000, 10 - RETURN doc - -* added cluster execution site info in execution plan explain output for AQL queries - -* for 30+ AQL functions there is now an additional implementation in C++ that removes - the need for internal data conversion when the function is called - -* the AQL editor in the web interface now supports using bind parameters - - -Deadlock detection ------------------- - -ArangoDB 2.8 now has an automatic deadlock detection for transactions. - -A deadlock is a situation in which two or more concurrent operations (user transactions -or AQL queries) try to access the same resources (collections, documents) and need to -wait for the others to finish, but none of them can make any progress. - -In case of such a deadlock, there would be no progress for any of the involved -transactions, and none of the involved transactions could ever complete. This is -completely undesirable, so the new automatic deadlock detection mechanism in ArangoDB -will automatically kick in and abort one of the transactions involved in such a deadlock. -Aborting means that all changes done by the transaction will be rolled back and error -29 (`deadlock detected`) will be thrown. - -Client code (AQL queries, user transactions) that accesses more than one collection -should be aware of the potential of deadlocks and should handle the error 29 -(`deadlock detected`) properly, either by passing the exception to the caller or -retrying the operation. - - -Replication ------------ - -The following improvements for replication have been made in 2.8 (note: most of them -have been backported to ArangoDB 2.7 as well): - -* added `autoResync` configuration parameter for continuous replication. - - When set to `true`, a replication slave will automatically trigger a full data - re-synchronization with the master when the master cannot provide the log data - the slave had asked for. Note that `autoResync` will only work when the option - `requireFromPresent` is also set to `true` for the continuous replication, or - when the continuous syncer is started and detects that no start tick is present. - - Automatic re-synchronization may transfer a lot of data from the master to the - slave and may be expensive. It is therefore turned off by default. - When turned off, the slave will never perform an automatic re-synchronization - with the master. - -* added `idleMinWaitTime` and `idleMaxWaitTime` configuration parameters for - continuous replication. - - These parameters can be used to control the minimum and maximum wait time the - slave will (intentionally) idle and not poll for master log changes in case the - master had sent the full logs already. - The `idleMaxWaitTime` value will only be used when `adapativePolling` is set - to `true`. When `adaptivePolling` is disabled, only `idleMinWaitTime` will be - used as a constant time span in which the slave will not poll the master for - further changes. The default values are 0.5 seconds for `idleMinWaitTime` and - 2.5 seconds for `idleMaxWaitTime`, which correspond to the hard-coded values - used in previous versions of ArangoDB. - -* added `initialSyncMaxWaitTime` configuration parameter for initial and continuous - replication - - This option controls the maximum wait time (in seconds) that the initial - synchronization will wait for a response from the master when fetching initial - collection data. If no response is received within this time period, the initial - synchronization will give up and fail. This option is also relevant for - continuous replication in case *autoResync* is set to *true*, as then the - continuous replication may trigger a full data re-synchronization in case - the master cannot the log data the slave had asked for. - -* HTTP requests sent from the slave to the master during initial synchronization - will now be retried if they fail with connection problems. - -* the initial synchronization now logs its progress so it can be queried using - the regular replication status check APIs. - -* added `async` attribute for `sync` and `syncCollection` operations called from - the ArangoShell. Setthing this attribute to `true` will make the synchronization - job on the server go into the background, so that the shell does not block. The - status of the started asynchronous synchronization job can be queried from the - ArangoShell like this: - - /* starts initial synchronization */ - var replication = require("org/arangodb/replication"); - var id = replication.sync({ - endpoint: "tcp://master.domain.org:8529", - username: "myuser", - password: "mypasswd", - async: true - }); - - /* now query the id of the returned async job and print the status */ - print(replication.getSyncResult(id)); - - The result of `getSyncResult()` will be `false` while the server-side job - has not completed, and different to `false` if it has completed. When it has - completed, all job result details will be returned by the call to `getSyncResult()`. - -* the web admin interface dashboard now shows a server's replication status - at the bottom of the page - - -Web Admin Interface -------------------- - -The following improvements have been made for the web admin interface: - -* the AQL editor now has support for bind parameters. The bind parameter values can - be edited in the web interface and saved with a query for future use. - -* the AQL editor now allows canceling running queries. This can be used to cancel - long-running queries without switching to the *query management* section. - -* the dashboard now provides information about the server's replication status at - the bottom of the page. This can be used to track either the status of a one-time - synchronization or the continuous replication. - -* the compaction status and some status internals about collections are now displayed - in the detail view for a collection in the web interface. These data can be used - for debugging compaction issues. - -* unloading a collection via the web interface will now trigger garbage collection - in all v8 contexts and force a WAL flush. This increases the chances of perfoming - the unload faster. - -* the status terminology for collections for which an unload request has been issued - via the web interface was changed from `in the process of being unloaded` to - `will be unloaded`. This is more accurate as the actual unload may be postponed - until later if there are still references pointing to data in the collection. - - -Foxx improvements ------------------ - -* the module resolution used by `require` now behaves more like in node.js - -* the `org/arangodb/request` module now returns response bodies for error responses - by default. The old behavior of not returning bodies for error responses can be - re-enabled by explicitly setting the option `returnBodyOnError` to `false` - - -Miscellaneous changes ---------------------- - -The startup option `--server.hide-product-header` can be used to make the server -not send the HTTP response header `"Server: ArangoDB"` in its HTTP responses. This -can be used to conceal the server make from HTTP clients. -By default, the option is turned off so the header is still sent as usual. - -arangodump and arangorestore now have better error reporting. Additionally, arangodump -will now fail by default when trying to dump edges that refer to already dropped -collections. This can be circumvented by specifying the option `--force true` when -invoking arangodump. - -arangoimp now provides an option `--create-collection-type` to specify the type of -the collection to be created when `--create-collection` is set to `true`. Previously -`--create-collection` always created document collections and the creation of edge -collections was not possible. - diff --git a/Documentation/Books/Manual/ReleaseNotes/NewFeatures30.md b/Documentation/Books/Manual/ReleaseNotes/NewFeatures30.md deleted file mode 100644 index 128b6f8e168c..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/NewFeatures30.md +++ /dev/null @@ -1,540 +0,0 @@ -Features and Improvements in ArangoDB 3.0 -========================================= - -The following list shows in detail which features have been added or improved in -ArangoDB 3.0. ArangoDB 3.0 also contains several bugfixes that are not listed -here. - -Internal data format changes ----------------------------- - -ArangoDB now uses [VelocyPack](https://github.com/arangodb/velocypack) for -storing documents, query results and temporarily computed values. Using a single -data format removed the need for some data conversions in the core that slowed -operations down previously. - -The VelocyPack format is also quite compact, and reduces storage space -requirements for "small" values such as boolean, integers, short strings. This -can speed up several operations inside AQL queries. - -VelocyPack document entries stored on disk are also self-contained, in the sense -that each stored document will contain all of its data type and attribute name -descriptions. While this may require a bit more space for storing the documents, -it removes the overhead of fetching attribute names and document layout from -shared structures as in previous versions of ArangoDB. It also simplifies the -code paths for storing and reading documents. - -AQL improvements ----------------- - -### Syntax improvements - -#### `LIKE` string-comparison operator - -AQL now provides a `LIKE` operator and can be used to compare strings like this, -for example inside filter conditions: - -``` -value LIKE search -``` - -This change makes `LIKE` an AQL keyword. Using `LIKE` as an attribute or collection -name in AQL thus requires quoting the name from now on. - -The `LIKE` operator is currently implemented by calling the already existing AQL -function `LIKE`, which also remains operational in 3.0. Use the `LIKE` function -in case you want to search case-insensitive (optional parameter), as the `LIKE` -operator always compares case-sensitive. - -#### AQL array comparison operators - -All AQL comparison operators now also exist in an array variant. In the -array variant, the operator is preceded with one of the keywords *ALL*, *ANY* -or *NONE*. Using one of these keywords changes the operator behavior to -execute the comparison operation for all, any, or none of its left hand -argument values. It is therefore expected that the left hand argument -of an array operator is an array. - -Examples: - -``` -[ 1, 2, 3 ] ALL IN [ 2, 3, 4 ] // false -[ 1, 2, 3 ] ALL IN [ 1, 2, 3 ] // true -[ 1, 2, 3 ] NONE IN [ 3 ] // false -[ 1, 2, 3 ] NONE IN [ 23, 42 ] // true -[ 1, 2, 3 ] ANY IN [ 4, 5, 6 ] // false -[ 1, 2, 3 ] ANY IN [ 1, 42 ] // true -[ 1, 2, 3 ] ANY == 2 // true -[ 1, 2, 3 ] ANY == 4 // false -[ 1, 2, 3 ] ANY > 0 // true -[ 1, 2, 3 ] ANY <= 1 // true -[ 1, 2, 3 ] NONE < 99 // false -[ 1, 2, 3 ] NONE > 10 // true -[ 1, 2, 3 ] ALL > 2 // false -[ 1, 2, 3 ] ALL > 0 // true -[ 1, 2, 3 ] ALL >= 3 // false -["foo", "bar"] ALL != "moo" // true -["foo", "bar"] NONE == "bar" // false -["foo", "bar"] ANY == "foo" // true -``` - -#### Regular expression string-comparison operators - -AQL now supports the operators *=~* and *!~* for testing strings against regular -expressions. *=~* tests if a string value matches a regular expression, and *!~* tests -if a string value does not match a regular expression. - -The two operators expect their left-hand operands to be strings, and their right-hand -operands to be strings containing valid regular expressions as specified below. - -The regular expressions may consist of literal characters and the following -characters and sequences: - -- `.` – the dot matches any single character except line terminators. - To include line terminators, use `[\s\S]` instead to simulate `.` with *DOTALL* flag. -- `\d` – matches a single digit, equivalent to `[0-9]` -- `\s` – matches a single whitespace character -- `\S` – matches a single non-whitespace character -- `\t` – matches a tab character -- `\r` – matches a carriage return -- `\n` – matches a line-feed character -- `[xyz]` – set of characters. matches any of the enclosed characters (i.e. - *x*, *y* or *z* in this case -- `[^xyz]` – negated set of characters. matches any other character than the - enclosed ones (i.e. anything but *x*, *y* or *z* in this case) -- `[x-z]` – range of characters. Matches any of the characters in the - specified range, e.g. `[0-9A-F]` to match any character in - *0123456789ABCDEF* -- `[^x-z]` – negated range of characters. Matches any other character than the - ones specified in the range -- `(xyz)` – defines and matches a pattern group -- `(x|y)` – matches either *x* or *y* -- `^` – matches the beginning of the string (e.g. `^xyz`) -- `$` – matches the end of the string (e.g. `xyz$`) - -Note that the characters `.`, `*`, `?`, `[`, `]`, `(`, `)`, `{`, `}`, `^`, -and `$` have a special meaning in regular expressions and may need to be -escaped using a backslash (`\\`). A literal backslash should also be escaped -using another backslash, i.e. `\\\\`. - -Characters and sequences may optionally be repeated using the following -quantifiers: - -- `x*` – matches zero or more occurrences of *x* -- `x+` – matches one or more occurrences of *x* -- `x?` – matches one or zero occurrences of *x* -- `x{y}` – matches exactly *y* occurrences of *x* -- `x{y,z}` – matches between *y* and *z* occurrences of *x* -- `x{y,}` – matches at least *y* occurrences of *x* - -#### Enclosing identifiers in forward ticks - -AQL identifiers can now optionally be enclosed in forward ticks in addition to using -backward ticks. This allows convenient writing of AQL queries in JavaScript template -strings (which are delimited with backticks themselves), e.g. - -```js -var q = `FOR doc IN ´collection´ RETURN doc.´name´`; -``` - -### Functions added - -The following AQL functions have been added in 3.0: - -- *REGEX_TEST(value, regex)*: tests whether the string *value* matches the regular expression - specified in *regex*. Returns *true* if it matches, and *false* otherwise. - - The syntax for regular expressions is the same as for the regular expression operators - *=~* and *!~*. - -- *HASH(value)*: Calculates a hash value for *value*. *value* is not required to be a - string, but can have any data type. The calculated hash value will take the data type - of *value* into account, so for example the number *1* and the string *"1"* will have - different hash values. For arrays the hash values will be creared if the arrays contain - exactly the same values (including value types) in the same order. For objects the same - hash values will be created if the objects have exactly the same attribute names and - values (including value types). The order in which attributes appear inside objects - is not important for hashing. - The hash value returned by this function is a number. The hash algorithm is not guaranteed - to remain the same in future versions of ArangoDB. The hash values should therefore be - used only for temporary calculations, e.g. to compare if two documents are the same, or - for grouping values in queries. - -- *TYPENAME(value)*: Returns the data type name of *value*. The data type name can - be either *null*, *bool*, *number*, *string*, *array* or *object*. - -- *LOG(value)*: Returns the natural logarithm of *value*. The base is Euler's constant - (2.71828...). - -- *LOG2(value)*: Returns the base 2 logarithm of *value*. - -- *LOG10(value)*: Returns the base 10 logarithm of *value*. - -- *EXP(value)*: Returns Euler's constant (2.71828...) raised to the power of *value*. - -- *EXP2(value)*: Returns 2 raised to the power of *value*. - -- *SIN(value)*: Returns the sine of *value*. - -- *COS(value)*: Returns the cosine of *value*. - -- *TAN(value)*: Returns the tangent of *value*. - -- *ASIN(value)*: Returns the arcsine of *value*. - -- *ACOS(value)*: Returns the arccosine of *value*. - -- *ATAN(value)*: Returns the arctangent of *value*. - -- *ATAN2(y, x)*: Returns the arctangent of the quotient of *y* and *x*. - -- *RADIANS(value)*: Returns the angle converted from degrees to radians. - -- *DEGREES(value)*: Returns the angle converted from radians to degrees. - -### Optimizer improvements - -#### "inline-subqueries" rule - -The AQL optimizer rule "inline-subqueries" has been added. This rule can pull -out certain subqueries that are used as an operand to a `FOR` loop one level -higher, eliminating the subquery completely. This reduces complexity of the -query's execution plan and will likely enable further optimizations. For -example, the query - -``` -FOR i IN ( - FOR j IN [1,2,3] - RETURN j - ) - RETURN i -``` - -will be transformed by the rule to: - -``` -FOR i IN [1,2,3] - RETURN i -``` - -The query - -``` -FOR name IN ( - FOR doc IN _users - FILTER doc.status == 1 - RETURN doc.name - ) - LIMIT 2 - RETURN name -``` - -will be transformed into - -``` -FOR tmp IN _users - FILTER tmp.status == 1 - LIMIT 2 - RETURN tmp.name -``` - -The rule will only fire when the subquery is used as an operand to a `FOR` loop, -and if the subquery does not contain a `COLLECT` with an `INTO` variable. - -#### "remove-unnecessary-calculations" rule - -The AQL optimizer rule "remove-unnecessary-calculations" now fires in more cases -than in previous versions. This rule removes calculations from execution plans, -and by having less calculations done, a query may execute faster or requires -less memory. - -The rule will now remove calculations that are used exactly once in other -expressions (e.g. `LET a = doc RETURN a.value`) and calculations, or calculations -that are just references to other variables (e.g. `LET a = b`). - -#### "optimize-traversals" rule - -The AQL optimizer rule "merge-traversal-filter" was renamed to "optimize-traversals". -The rule will remove unused edge and path result variables from the traversal in case -they are specified in the `FOR` section of the traversal, but not referenced later in -the query. This saves constructing edges and paths results that are not used later. - -AQL now uses VelocyPack internally for storing intermediate values. For many value types -it can now get away without extra memory allocations and less internal conversions. -Values can be passed into internal AQL functions without copying them. This can lead to -reduced query execution times for queries that use C++-based AQL functions. - -#### "replace-or-with-in" and "use-index-for-sort" rules - -These rules now fire in some additional cases, which allows simplifying index lookup -conditions and removing SortNodes from execution plans. - -Cluster state management ------------------------- - -The cluster's internal state information is now also managed by ArangoDB instances. -Earlier versions relied on third party software being installed for the storing the -cluster state. -The state is managed by dedicated ArangoDB instances, which can be started in a special -*agency* mode. These instances can operate in a distributed fashion. They will -automatically elect one of them to become their leader, being responsibile for storing -the state changes sent from servers in the cluster. The other instances will automatically -follow the leader and will transparently stand in should it become unavailable. -The agency instances are also self-organizing: they will continuously probe each -other and re-elect leaders. The communication between the agency instances use the -consensus-based RAFT protocol. - -The operations for storing and retrieving cluster state information are now much less -expensive from an ArangoDB cluster node perspective, which in turn allows for faster -cluster operations that need to fetch or update the overall cluster state. - -`_from` and `_to` attributes of edges are updatable and usable in indexes -------------------------------------------------------------------------- - -In ArangoDB prior to 3.0 the attributes `_from` and `_to` of edges were treated -specially when loading or storing edges. That special handling led to these attributes -being not as flexible as regular document attributes. For example, the `_from` and -`_to` attribute values of an existing edge could not be updated once the edge was -created. Now this is possible via the single-document APIs and via AQL. - -Additionally, the `_from` and `_to` attributes could not be indexed in -user-defined indexes, e.g. to make each combination of `_from` and `_to` unique. -Finally, as `_from` and `_to` referenced the linked collections by collection id -and not by collection name, their meaning became unclear once a referenced collection -was dropped. The collection id stored in edges then became unusable, and when -accessing such edge the collection name part of it was always translated to `_undefined`. - -In ArangoDB 3.0, the `_from` and `_to` values of edges are saved as regular strings. -This allows using `_from` and `_to` in user-defined indexes. Additionally, this allows -to update the `_from` and `_to` values of existing edges. Furthermore, collections -referenced by `_from` and `_to` values may be dropped and re-created later. Any -`_from` and `_to` values of edges pointing to such dropped collection are unaffected -by the drop operation now. - -Unified APIs for CRUD operations --------------------------------- - -The CRUD APIs for documents and edge have been unified. Edges can now be inserted -and modified via the same APIs as documents. `_from` and `_to` attribute values can -be passed as regular document attributes now: - -```js -db.myedges.insert({ _from: "myvertices/some", _to: "myvertices/other", ... }); -``` - -Passing `_from` and `_to` separately as it was required in earlier versions is not -necessary anymore but will still work: - -```js -db.myedges.insert("myvertices/some", "myvertices/other", { ... }); -``` - -The CRUD operations now also support batch variants that works on arrays of -documents/edges, e.g. - -```js -db.myedges.insert([ - { _from: "myvertices/some", _to: "myvertices/other", ... }, - { _from: "myvertices/who", _to: "myvertices/friend", ... }, - { _from: "myvertices/one", _to: "myvertices/two", ... }, -]); -``` - -The batch variants are also available in ArangoDB's HTTP API. They can be used to -more efficiently carry out operations with multiple documents than their single-document -equivalents, which required one HTTP request per operation. With the batch operations, -the HTTP request/response overhead can be amortized across multiple operations. - -Persistent indexes ------------------- - -ArangoDB 3.0 provides an experimental persistent index feature. Persistent indexes store -the index values on disk instead of in-memory only. This means the indexes do not need -to be rebuilt in-memory when a collection is loaded or reloaded, which should improve -collection loading times. - -The persistent indexes in ArangoDB are based on the RocksDB engine. -To create a persistent index for a collection, create an index of type "rocksdb" as -follows: - -```js -db.mycollection.ensureIndex({ type: "rocksdb", fields: [ "fieldname" ]}); -``` - -The persistent indexes are sorted, so they allow equality lookups and range queries. -Note that the feature is still highly experimental and has some known deficiencies. It -will be finalized until the release of the 3.0 stable version. - -Upgraded V8 version -------------------- - -The V8 engine that is used inside ArangoDB to execute JavaScript code has been upgraded from -version 4.3.61 to 5.0.71.39. The new version makes several more ES6 features available by -default, including - -- arrow functions -- computed property names -- rest parameters -- array destructuring -- numeric and object literals - -Web Admin Interface -------------------- - -The ArangoDB 3.0 web interface is significantly improved. It now comes with a more -responsive design, making it easier to use on different devices. Navigation and menus -have been simplified, and related items have been regrouped to stay closer together -and allow tighter workflows. - -The AQL query editor is now much easier to use. Multiple queries can be started and tracked -in parallel, while results of earlier queries are still preserved. Queries still running -can be canceled directly from the editor. The AQL query editor now allows the usage of bind -parameters too, and provides a helper for finding collection names, AQL function names and -keywords quickly. - -The web interface now keeps track of whether the server is offline and of which server-side -operations have been started and are still running. It now remains usable while such -longer-running operations are ongoing. It also keeps more state about user's choices (e.g. -windows sizes, whether the tree or the code view was last used in the document editor). - -Cluster statistics are now integrated into the web interface as well. Additionally, a -menu item "Help us" has been added to easily provide the ArangoDB team feedback about -the product. - -The frontend may now be mounted behind a reverse proxy on a different path. For this to work -the proxy should send a X-Script-Name header containing the path. - -A backend configuration for haproxy might look like this: - -``` -reqadd X-Script-Name:\ /arangodb -``` - -The frontend will recognize the subpath and produce appropriate links. ArangoDB will only -accept paths from trusted frontend proxies. Trusted proxies may be added on startup: - -``` ---frontend.proxy-request-check true --frontend.trusted-proxy 192.168.1.117 -``` - ---frontend.trusted-proxy may be any address or netmask. - -To disable the check and blindly accept any x-script-name set --frontend.proxy-request-check -to false. - -Foxx improvements ------------------ - -The Foxx framework has been completely rewritten for 3.0 with a new, simpler and -more familiar API. The most notable changes are: - -* Legacy mode for 2.8 services - - Stuck with old code? You can continue using your 2.8-compatible Foxx services with - 3.0 by adding `"engines": {"arangodb": "^2.8.0"}` (or similar version ranges that - exclude 3.0 and up) to the service manifest. - -* No more global variables and magical comments - - The `applicationContext` is now `module.context`. Instead of magical comments just - use the `summary` and `description` methods to document your routes. - -* Repository and Model have been removed - - Instead of repositories just use ArangoDB collections directly. For validation simply - use the joi schemas (but wrapped in `joi.object()`) that previously lived inside the - model. Collections and queries return plain JavaScript objects. - -* Controllers have been replaced with nestable routers - - Create routers with `require('@arangodb/foxx/router')()`, attach them to your service - with `module.context.use(router)`. Because routers are no longer mounted automagically, - you can export and import them like any other object. Use `router.use('/path', subRouter)` - to nest routers as deeply as you want. - -* Routes can be named and reversed - - No more memorizing URLs: add a name to your route like - `router.get('/hello/:name', function () {...}, 'hello')` and redirect to the full URL - with `res.redirect(req.resolve('hello', {name: 'world'}))`. - -* Simpler express-like middleware - - If you already know express, this should be familiar. Here's a request logger in - three lines of code: - - ```js - router.use(function (req, res, next) { - var start = Date.now(); - try {next();} - finally {console.log(`${req.method} ${req.url} ${res.statusCode} ${Date.now() - start}ms`);} - }); - ``` - -* Sessions and auth without dependencies - - To make it easier to get started, the functionality previously provided by the - `simple-auth`, `oauth2`, `sessions-local` and `sessions-jwt` services have been moved - into Foxx as the `@arangodb/foxx/auth`, `@arangodb/foxx/oauth2` and `@arangodb/foxx/sessions` - modules. - -Logging -------- - -ArangoDB's logging is now grouped into topics. The log verbosity and output files can -be adjusted per log topic. For example - -``` ---log.level startup=trace --log.level queries=trace --log.level info -``` - -will log messages concerning startup at trace level, AQL queries at trace level and -everything else at info level. `--log.level` can be specified multiple times at startup, -for as many topics as needed. - -Some relevant log topics available in 3.0 are: - -- *collector*: information about the WAL collector's state -- *compactor*: information about the collection datafile compactor -- *datafiles*: datafile-related operations -- *mmap*: information about memory-mapping operations (including msync) -- *queries*: executed AQL queries, slow queries -- *replication*: replication-related info -- *requests*: HTTP requests -- *startup*: information about server startup and shutdown -- *threads*: information about threads - -This also allows directing log output to different files based on topics. For -example, to log all AQL queries to a file "queries.log" one can use the options: - -``` ---log.level queries=trace --log.output queries=file:///path/to/queries.log -``` - -To additionally log HTTP request to a file named "requests.log" add the options: - -``` ---log.level requests=info --log.output requests=file:///path/to/requests.log -``` - -Build system ------------- - -ArangoDB now uses the cross-platform build system CMake for all its builds. -Previous versions used two different build systems, making development and -contributions harder than necessary. Now the build system is unified, and -all targets (Linux, Windows, macOS) are built from the same set of build -instructions. - -Documentation -------------- - -The documentation has been enhanced and re-organized to be more intuitive. - -A new introduction for beginners should bring you up to speed with ArangoDB -in less than an hour. Additional topics have been introduced and will be -extended with upcoming releases. - -The topics AQL and HTTP API are now separated from the manual for better -searchability and less confusion. A version switcher makes it easier to -jump to the version of the docs you are interested in. diff --git a/Documentation/Books/Manual/ReleaseNotes/NewFeatures31.md b/Documentation/Books/Manual/ReleaseNotes/NewFeatures31.md deleted file mode 100644 index 23b054c2fffc..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/NewFeatures31.md +++ /dev/null @@ -1,225 +0,0 @@ -Features and Improvements in ArangoDB 3.1 -========================================= - -The following list shows in detail which features have been added or improved in -ArangoDB 3.1. ArangoDB 3.1 also contains several bugfixes that are not listed -here. - -SmartGraphs ------------ - -ArangoDB 3.1 adds a first major Enterprise Edition feature called SmartGraphs. -SmartGraphs form an addition to the already existing graph features and allow to -scale graphs beyond a single machine while keeping almost the same query performance. -The SmartGraph feature is suggested for all graph database use cases that require -a cluster of database servers for what ever reason. -You can either have a graph that is too large to be stored on a single machine only. -Or you can have a small graph, but at the same time need additional data with has to be -sharded and you want to keep all of them in the same environment. -Or you simply use the cluster for high-availability. -In all the above cases SmartGraphs will significantly increase the performance of -graph operations. -For more detailed information read the [SmartGraphs section](../Graphs/SmartGraphs/index.html). - -Data format ------------ - -The format of the revision values stored in the `_rev` attribute of documents -has been changed in 3.1. Up to 3.0 they were strings containing largish decimal numbers. With 3.1, revision values are still strings, but are actually encoded time stamps of the creation date of the revision of the document. The time stamps are acquired using a hybrid logical clock (HLC) on the DBserver that holds the -revision (for the concept of a hybrid logical clock see -[this paper](http://www.cse.buffalo.edu/tech-reports/2014-04.pdf)). -See [this manual section](../DataModeling/Documents/DocumentAddress.html#document-revision) for details. - -ArangoDB >= 3.1 can ArangoDB 3.0 database directories and will simply continue -to use the old `_rev` attribute values. New revisions will be written with -the new time stamps. - -It is highly recommended to backup all your data before loading a database -directory that was written by ArangoDB <= 3.0 into an ArangoDB >= 3.1. - -Communication Layer -------------------- - -ArangoDB up to 3.0 used [libev](http://software.schmorp.de/pkg/libev.html) for -the communication layer. ArangoDB starting from 3.1 uses -[Boost ASIO](https://www.boost.org). - -Starting with ArangoDB 3.1 we begin to provide the VelocyStream Protocol (vst) as -a addition to the established http protocol. - -A few options have changed concerning communication, please checkout -[Incompatible changes in 3.1](./UpgradingChanges31.html). - -Cluster -------- - -For its internal cluster communication a (bundled version) of curl is now being -used. This enables asynchronous operation throughout the cluster and should -improve general performance slightly. - -Authentication is now supported within the cluster. - - -Document revisions cache ------------------------- - -The ArangoDB server now provides an in-memory cache for frequently accessed -document revisions. Documents that are accessed during read/write operations -are loaded into the revisions cache automatically, and subsequently served from -there. - -The cache has a total target size, which can be controlled with the startup -option `--database.revision-cache-target-size`. Once the cache reaches the -target size, older entries may be evicted from the cache to free memory. Note that -the target size currently is a high water mark that will trigger cache memory -garbage collection if exceeded. However, if all cache chunks are still in use -when the high water mark is reached, the cache may still grow and allocate more -chunks until cache entries become unused and are allowed to be garbage-collected. - -The cache is maintained on a per-collection basis, that is, memory for the cache -is allocated on a per-collection basis in chunks. The size for the cache memory -chunks can be controlled via the startup option `--database.revision-cache-chunk-size`. -The default value is 4 MB per chunk. -Bigger chunk sizes allow saving more documents per chunk, which can lead to more -efficient chunk allocation and lookups, but will also lead to memory waste if many -chunks are allocated and not fully used. The latter will be the case if there exist -many small collections which all allocate their own chunks but not fully utilize them -because of the low number of documents. - - -AQL ---- - -### Functions added - -The following AQL functions have been added in 3.1: - -- *OUTERSECTION(array1, array2, ..., arrayn)*: returns the values that occur - only once across all arrays specified. - -- *DISTANCE(lat1, lon1, lat2, lon2)*: returns the distance between the two - coordinates specified by *(lat1, lon1)* and *(lat2, lon2)*. The distance is - calculated using the haversine formula. - -- *JSON_STRINGIFY(value)*: returns a JSON string representation of the value. - -- *JSON_PARSE(value)*: converts a JSON-encoded string into a regular object - - -### Index usage in traversals - -3.1 allows AQL traversals to use other indexes than just the edge index. -Traversals with filters on edges can now make use of more specific indexes. For -example, the query - - FOR v, e, p IN 2 OUTBOUND @start @@edge - FILTER p.edges[0].foo == "bar" - RETURN [v, e, p] - -may use a hash index on `["_from", "foo"]` instead of the edge index on just -`["_from"]`. - - -### Optimizer improvements - -Make the AQL query optimizer inject filter condition expressions referred to -by variables during filter condition aggregation. For example, in the following -query - - FOR doc IN collection - LET cond1 = (doc.value == 1) - LET cond2 = (doc.value == 2) - FILTER cond1 || cond2 - RETURN { doc, cond1, cond2 } - -the optimizer will now inject the conditions for `cond1` and `cond2` into the -filter condition `cond1 || cond2`, expanding it to `(doc.value == 1) || (doc.value == 2)` -and making these conditions available for index searching. - -Note that the optimizer previously already injected some conditions into other -conditions, but only if the variable that defined the condition was not used elsewhere. -For example, the filter condition in the query - - FOR doc IN collection - LET cond = (doc.value == 1) - FILTER cond - RETURN { doc } - -already got optimized before because `cond` was only used once in the query and the -optimizer decided to inject it into the place where it was used. - -This only worked for variables that were referred to once in the query. When a variable -was used multiple times, the condition was not injected as in the following query - - FOR doc IN collection - LET cond = (doc.value == 1) - FILTER cond - RETURN { doc, cond } - -3.1 allows using this condition so that the query can use an index on `doc.value` -(if such index exists). - - -### Miscellaneous improvements - -The performance of the `[*]` operator was improved for cases in which this operator -did not use any filters, projections and/or offset/limits. - -The AQL query executor can now report the time required for loading and locking the -collections used in an AQL query. When profiling is enabled, it will report the total -loading and locking time for the query in the `loading collections` sub-attribute of the -`extra.profile` value of the result. The loading and locking time can also be view in the -AQL query editor in the web interface. - -Audit Log ---------- - -Audit logging has been added, see [Auditing](../Security/Auditing/README.md). - -Client tools ------------- - -Added option `--skip-lines` for arangoimp -This allows skipping the first few lines from the import file in case the CSV or TSV -import are used and some initial lines should be skipped from the input. - -Web Admin Interface -------------------- - -The usability of the AQL editor significantly improved. In addition to the standard JSON -output, the AQL Editor is now able to render query results as a graph preview or a table. -Furthermore the AQL editor displays query profiling information. - -Added a new Graph Viewer in order to exchange the technically obsolete version. The new Graph -Viewer is based on Canvas but does also include a first WebGL implementation (limited -functionality - will change in the future). The new Graph Viewer offers a smooth way to -discover and visualize your graphs. - -The shard view in cluster mode now displays a progress indicator while moving shards. - -Authentication --------------- - -Up to ArangoDB 3.0 authentication of client requests was only possible with HTTP basic -authentication. - -Starting with 3.1 it is now possible to also use a [JSON Web Tokens](https://jwt.io/) -(JWT) for authenticating incoming requests. - -For details check the HTTP authentication chapter. Both authentication methods are -valid and will be supported in the near future. Use whatever suits you best. - -Foxx ----- - -### GraphQL - -It is now easy to get started with providing GraphQL APIs in Foxx, see [Foxx GraphQL](../Foxx/Reference/Modules/GraphQL.md). - -### OAuth2 - -Foxx now officially provides a module for implementing OAuth2 clients, see [Foxx OAuth2](../Foxx/Reference/Modules/OAuth2.md). - -### Per-route middleware - -It's now possible to specify middleware functions for a route when defining a route handler. These middleware functions only apply to the single route and share the route's parameter definitions. Check out the [Foxx Router documentation](../Foxx/Reference/Routers/README.md) for more information. diff --git a/Documentation/Books/Manual/ReleaseNotes/NewFeatures32.md b/Documentation/Books/Manual/ReleaseNotes/NewFeatures32.md deleted file mode 100644 index 9db01b6691e8..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/NewFeatures32.md +++ /dev/null @@ -1,400 +0,0 @@ -Features and Improvements in ArangoDB 3.2 -========================================= - -The following list shows in detail which features have been added or improved in -ArangoDB 3.2. ArangoDB 3.2 also contains several bugfixes that are not listed -here. - - -Storage engines ---------------- - -ArangoDB 3.2 offers two storage engines: - -* the always-existing memory-mapped files storage engine -* a new storage engine based on [RocksDB](https://www.github.com/facebook/rocksdb/) - -### Memory-mapped files storage engine (MMFiles) - -The former storage engine (named MMFiles engine henceforth) persists data in memory-mapped -files. - -Any data changes are done first in the engine's write-ahead log (WAL). The WAL -is replayed after a crash so the engine offers durability and crash-safety. Data -from the WAL is eventually moved to collection-specific datafiles. The files are -always written in an append-only fashion, so data in files is never overwritten. -Obsolete data in files will eventually be purged by background compaction threads. - -Most of this engine's indexes are built in RAM. When a collection is loaded, this requires -rebuilding the indexes in RAM from the data stored on disk. The MMFiles engine has -collection-level locking. - -This storage engine is a good choice when data (including the indexes) can fit in the -server's available RAM. If the size of data plus the in-memory indexes exceeds the size -of the available RAM, then this engine may try to allocate more memory than available. -This will either make the operating system swap out parts of the data (and cause disk I/O) -or, when no swap space is configured, invoke the operating system's out-of-memory process -killer. - -The locking strategy allows parallel reads and is often good enough in read-mostly -workloads. Writes need exclusive locks on the collections, so they can block other -operations in the same collection. The locking strategy also provides transactional consistency -and isolation. - -### RocksDB storage engine - -The RocksDB storage engine is new in ArangoDB 3.2. It is designed to store datasets -that are bigger than the server's available RAM. It persists all data (including the -indexes) in a RocksDB instance. - -That means any document read or write operations will be answered by RocksDB under the -hood. RocksDB will serve the data from its own in-RAM caches or from disk. -The RocksDB engine has a write-ahead log (WAL) and uses background threads for compaction. -It supports data compression. - -The RocksDB storage engine has document-level locking. Read operations do not block and -are never blocked by other operations. Write operations only block writes on the same -documents/index values. Because multiple writers can operate in parallel on the same -collection, there is the possibility of write-write conflicts. If such write conflict -is detected, one of the write operations is aborted with error 1200 ("conflict"). -Client applications can then either abort the operation or retry, based on the required -consistency semantics. - -### Storage engine selection - -The storage engine to use in an ArangoDB cluster or a single-server instance must be -selected initially. The default storage engine in ArangoDB 3.2 is the MMFiles engine if -no storage engine is selected explicitly. This ensures all users upgrading from earlier -versions can continue with the well-known MMFiles engine. - -To select the storage-engine, there is the configuration option `--server.storage-engine`. -It can be set to either `mmfiles`, `rocksdb` or `auto`. While the first two values will -explicitly select a storage engine, the `auto` option will automatically choose the -storage engine based on which storage engine was previously selected. If no engine was -selected previously, `auto` will select the MMFiles engine. If an engine was previously -selected, the selection will be written to a file `ENGINE` in the server's database -directory and will be read from there at any subsequent server starts. - -Once the storage engine was selected, the selection cannot be changed by adjusting -`--server.storage-engine`. In order to switch to another storage engine, it is required -to re-start the server with another (empty) database directory. In order to use data -created with the other storage engine, it is required to dump the data first with the -old engine and restore it using the new storage engine. This can be achieved via -invoking arangodump and arangorestore. - -Unlike in MySQL, the storage engine selection in ArangoDB is for an entire cluster or -an entire single-server instance. All databases and collections will use the same storage -engine. - -### RocksDB storage engine: supported index types - -The existing indexes in the RocksDB engine are all persistent. The following indexes are -supported there: - -* primary: this type of index is automatically created. It indexes `_id` / `_key` - -* edge: this index is automatically created for edge collections. It indexes - `_from` and `_to` - -* hash, skiplist, persistent: these are user-defined indexes, Despite their names, they are - neither hash nor skiplist indexes. These index types map to the same RocksDB-based - sorted index implementation. The same is true for the "persistent" index. The names - "hash", "skiplist" and "persistent" are only used for compatibility with the MMFiles - engine where these indexes existed in previous and the current version of ArangoDB. - -* geo: user-defined index for proximity searches - -* fulltext: user-defined sorted reverted index on words occurring in documents - -Satellite Collections ---------------------- - -With SatelliteCollections, you can define collections to shard to a cluster and -collections to replicate to each machine. The ArangoDB query optimizer knows where -each shard is located and sends the requests to the DBServers involved, which then -executes the query, locally. With this approach, network hops during join -operations on sharded collections can be avoided and response times can be close to -that of a single instance. - -[Satellite collections](../Satellites.md) -are available in the *Enterprise Edition*. - - -Memory management ------------------ - -* make arangod start with less V8 JavaScript contexts - - This speeds up the server start and makes arangod use less memory at start. - Whenever a V8 context is needed by a Foxx action or some other JavaScript operation - and there is no usable V8 context, a new context will be created dynamically now. - - Up to `--javascript.v8-contexts` V8 contexts will be created, so this option - will change its meaning. Previously as many V8 contexts as specified by this - option were created at server start, and the number of V8 contexts did not - change at runtime. Now up to this number of V8 contexts will be in use at the - same time, but the actual number of V8 contexts is dynamic. - - The garbage collector thread will automatically delete unused V8 contexts after - a while. The number of spare contexts will go down to as few as configured in - the new option `--javascript.v8-contexts-minimum`. Actually that many V8 contexts - are also created at server start. - - The first few requests in new V8 contexts may take longer than in contexts - that have been there already. Performance may therefore suffer a bit for the - initial requests sent to ArangoDB or when there are only few but performance- - critical situations in which new V8 contexts need to be created. If this is a - concern, it can easily be fixed by setting `--javascipt.v8-contexts-minimum` - and `--javascript.v8-contexts` to a relatively high value, which will guarantee - that many number of V8 contexts to be created at startup and kept around even - when unused. - - Waiting for an unused V8 context will now also abort and write a log message - in case no V8 context can be acquired/created after 60 seconds. - -* the number of pending operations in arangod can now be limited to a configurable - number. If this number is exceeded, the server will now respond with HTTP 503 - (service unavailable). The maximum size of pending operations is controlled via - the startup option `--server.maximal-queue-size`. Setting it to 0 means "no limit". - -* the in-memory document revisions cache was removed entirely because it did not - provide the expected benefits. The 3.1 implementation shadowed document data in - RAM, which increased the server's RAM usage but did not speed up document lookups - too much. - - This also obsoletes the startup options `--database.revision-cache-chunk-size` and - `--database.revision-cache-target-size`. - - The MMFiles engine now does not use a document revisions cache but has in-memory - indexes and maps documents to RAM automatically via mmap when documents are - accessed. The RocksDB engine has its own mechanism for caching accessed documents. - - -Communication Layer -------------------- - -* HTTP responses returned by arangod will now include the extra HTTP header - `x-content-type-options: nosniff` to work around a cross-site scripting bug - in MSIE - -* the default value for `--ssl.protocol` was changed from TLSv1 to TLSv1.2. - When not explicitly set, arangod and all client tools will now use TLSv1.2. - -* the JSON data in all incoming HTTP requests in now validated for duplicate - attribute names. - - Incoming JSON data with duplicate attribute names will now be rejected as - invalid. Previous versions of ArangoDB only validated the uniqueness of - attribute names inside incoming JSON for some API endpoints, but not - consistently for all APIs. - -* Internal JavaScript REST actions will now hide their stack traces to the client - unless in HTTP responses. Instead they will always log to the logfile. - - -JavaScript ----------- - -* updated V8 version to 5.7.0.0 - -* change undocumented behaviour in case of invalid revision ids in - `If-Match` and `If-None-Match` headers from 400 (BAD) to 412 (PRECONDITION - FAILED). - -* change default string truncation length from 80 characters to 256 characters for - `print`/`printShell` functions in ArangoShell and arangod. This will emit longer - prefixes of string values before truncating them with `...`, which is helpful - for debugging. This change is mostly useful when using the ArangoShell (arangosh). - - -* the `@arangodb` module now provides a `time` function which returns the current time - in seconds as a floating point value with microsecond precision. - - -Foxx ----- - -* There is now an [official HTTP API for managing services](../../HTTP/Foxx/index.html), - allowing services to be installed, modified, uninstalled and reconfigured without - the administrative web interface. - -* It is now possible to upload a single JavaScript file instead of a zip archive - if your service requires no configuration, additional files or setup. - A minimal manifest will be generated automatically upon installation and the - uploaded file will be used as the service's main entry point. - - -Distributed Graph Processing ----------------------------- - -* We added support for executing distributed graph algorithms aka `Pregel`. -* Users can run arbitrary algorithms on an entire graph, including in cluster mode. -* We implemented a number of algorithms for various well-known graph measures: - * Connected Components - * PageRank - * Shortest Paths - * Centrality Measures (Centrality and Betweeness) - * Community Detection (via Label Propagation, Speakers-Listeners Label Propagation or DMID) -* Users can contribute their own algorithms - -AQL ---- - -### Optimizer improvements - -* Geo indexes are now implicitly and automatically used when using appropriate SORT/FILTER - statements in AQL, without the need to use the somewhat limited special-purpose geo AQL - functions `NEAR` or `WITHIN`. - - Compared to using the special purpose AQL functions this approach has the - advantage that it is more composable, and will also honor any `LIMIT` values - used in the AQL query. - - The special purpose `NEAR` AQL function can now be substituted with the - following AQL (provided there is a geo index present on the `doc.latitude` - and `doc.longitude` attributes): - - FOR doc in geoSort - SORT DISTANCE(doc.latitude, doc.longitude, 0, 0) - LIMIT 5 - RETURN doc - - `WITHIN` can be substituted with the following AQL: - - FOR doc in geoFilter - FILTER DISTANCE(doc.latitude, doc.longitude, 0, 0) < 2000 - RETURN doc - - -### Miscellaneous improvements - -* added `REGEX_REPLACE` AQL function - - `REGEX_REPLACE(text, search, replacement, caseInsensitive) → string` - - Replace the pattern *search* with the string *replacement* in the string - *text*, using regular expression matching. - - - **text** (string): the string to search in - - **search** (string): a regular expression search pattern - - **replacement** (string): the string to replace the *search* pattern with - - returns **string** (string): the string *text* with the *search* regex - pattern replaced with the *replacement* string wherever the pattern exists - in *text* - -* added new startup option `--query.fail-on-warning` to make AQL queries - abort instead of continuing with warnings. - - When set to *true*, this will make an AQL query throw an exception and - abort in case a warning occurs. This option should be used in development to catch - errors early. If set to *false*, warnings will not be propagated to exceptions and - will be returned with the query results. The startup option can also be overriden - on a per query-level. - -* the slow query list now contains the values of bind variables used in the - slow queries. Bind variables are also provided for the currently running - queries. This helps debugging slow or blocking queries that use dynamic - collection names via bind parameters. - -* AQL breaking change in cluster: - The SHORTEST_PATH statement using edge collection names instead - of a graph names now requires to explicitly name the vertex collection names - within the AQL query in the cluster. It can be done by adding `WITH ` - at the beginning of the query. - - Example: - ``` - FOR v,e IN OUTBOUND SHORTEST_PATH @start TO @target edges [...] - ``` - - Now has to be: - - ``` - WITH vertices - FOR v,e IN OUTBOUND SHORTEST_PATH @start TO @target edges [...] - ``` - - This change is due to avoid deadlock sitations in clustered case. - An error stating the above is included. - - -Client tools ------------- - -* added data export tool, arangoexport. - - arangoexport can be used to export collections to json, jsonl or xml - and export a graph or collections to xgmml. - -* added "jsonl" as input file type for arangoimp - -* added `--translate` option for arangoimp to translate attribute names from - the input files to attriubte names expected by ArangoDB - - The `--translate` option can be specified multiple times (once per translation - to be executed). The following example renames the "id" column from the input - file to "_key", and the "from" column to "_from", and the "to" column to "_to": - - arangoimp --type csv --file data.csv --translate "id=_key" --translate "from=_from" --translate "to=_to" - - `--translate` works for CSV and TSV inputs only. - -* added `--threads` option to arangoimp to specify the number of parallel import threads - -* changed default value for client tools option `--server.max-packet-size` from 128 MB - to 256 MB. this allows transferring bigger result sets from the server without the - client tools rejecting them as invalid. - - -Authentication --------------- - -* added [LDAP](../Programs/Arangod/Ldap.md) authentication (Enterprise Edition only) - - -Authorization --------------- - -* added read only mode for users -* collection level authorization rights - -Read more in the [overview](../Administration/ManagingUsers/README.md). - - -Foxx and authorization ----------------------- - -* the [cookie session transport](../Foxx/Reference/Sessions/Transports/Cookie.md) now supports - all options supported by the [cookie method of the response object](../Foxx/Reference/Routers/Response.md#cookie). - -* it's now possible to provide your own version of the `graphql-sync` module when using the [GraphQL extensions for Foxx](../Foxx/Reference/Modules/GraphQL.md) by passing a copy of the module using the new _graphql_ option. - -* custom API endpoints can now be tagged using the [tag method](../Foxx/Reference/Routers/Endpoints.md#tag) to generate a cleaner Swagger documentation. - - -Miscellaneous Changes ---------------------- - -* arangod now validates several OS/environment settings on startup and warns if - the settings are non-ideal. It additionally will print out ways to remedy the - options. - - Most of the checks are executed on Linux systems only. - -* added "deduplicate" attribute for array indexes, which controls whether inserting - duplicate index values from the same document into a unique array index will lead to - an error or not: - - // with deduplicate = true, which is the default value: - db._create("test"); - db.test.ensureIndex({ type: "hash", fields: ["tags[*]"], deduplicate: true }); - db.test.insert({ tags: ["a", "b"] }); - db.test.insert({ tags: ["c", "d", "c"] }); // will work, because deduplicate = true - db.test.insert({ tags: ["a"] }); // will fail - - // with deduplicate = false - db._create("test"); - db.test.ensureIndex({ type: "hash", fields: ["tags[*]"], deduplicate: false }); - db.test.insert({ tags: ["a", "b"] }); - db.test.insert({ tags: ["c", "d", "c"] }); // will not work, because deduplicate = false - db.test.insert({ tags: ["a"] }); // will fail diff --git a/Documentation/Books/Manual/ReleaseNotes/NewFeatures33.md b/Documentation/Books/Manual/ReleaseNotes/NewFeatures33.md deleted file mode 100644 index 7e9935d30bd2..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/NewFeatures33.md +++ /dev/null @@ -1,303 +0,0 @@ -Features and Improvements in ArangoDB 3.3 -========================================= - -The following list shows in detail which features have been added or improved in -ArangoDB 3.3. ArangoDB 3.3 also contains several bugfixes that are not listed -here. - -Datacenter-to-datacenter replication (DC2DC) --------------------------------------------- - -Every company needs a disaster recovery plan for all important systems. -This is true from small units like single processes running in some -container to the largest distributed architectures. For databases in -particular this usually involves a mixture of fault-tolerance, -redundancy, regular backups and emergency plans. The larger a -data store, the more difficult it is to come up with a good strategy. - -Therefore, it is desirable to be able to run a distributed database -in one data-center and replicate all transactions to another -data-center in some way. Often, transaction logs are shipped -over the network to replicate everything in another, identical -system in the other data-center. Some distributed data stores have -built-in support for multiple data-center awareness and can -replicate between data-centers in a fully automatic fashion. - -ArangoDB 3.3 takes an evolutionary step forward by introducing -multi-data-center support, which is asynchronous data-center to -data-center replication. Our solution is asynchronous and scales -to arbitrary cluster sizes, provided your network link between -the data-centers has enough bandwidth. It is fault-tolerant -without a single point of failure and includes a lot of -metrics for monitoring in a production scenario. - -[DC2DC](../Deployment/DC2DC/README.md) is available in the *Enterprise Edition*. - -Encrypted backups ------------------ - -Arangodump can now create encrypted backups using AES256 for encryption. -The encryption key can be read from a file or from a generator program. -It works in single server and cluster mode. - -Example for non-encrypted backup (everyone with access to the backup will be -able to read it): - - arangodump --collection "secret" dump - -In order to create an encrypted backup, add the `--encryption.keyfile` -option when invoking arangodump: - - arangodump --collection "secret" dump --encryption.keyfile ~/SECRET-KEY - -The key must be exactly 32 bytes long (required by the AES block cipher). - -Note that arangodump will not store the key anywhere. It is the responsibility -of the user to find a safe place for the key. However, arangodump will store -the used encryption method in a file named `ENCRYPTION` in the dump directory. -That way arangorestore can later find out whether it is dealing with an -encrypted dump or not. - -Trying to restore the encrypted dump without specifying the key will fail: - - arangorestore --collection "secret-collection" dump --create-collection true - -arangorestore will complain with: - -``` -the dump data seems to be encrypted with aes-256-ctr, but no key information was specified to decrypt the dump -it is recommended to specify either `--encryption.keyfile` or `--encryption.key-generator` when invoking arangorestore with an encrypted dump -``` - -It is required to use the exact same key when restoring the data. Again this is -done by providing the `--encryption.keyfile` parameter: - - arangorestore --collection "secret-collection" dump --create-collection true --encryption.keyfile ~/SECRET-KEY - -Using a different key will lead to the backup being non-recoverable. - -Note that encrypted backups can be used together with the already existing -RocksDB encryption-at-rest feature, but they can also be used for the MMFiles -engine, which does not have encryption-at-rest. - -[Encrypted backups](../Programs/Arangodump/Examples.md#encryption) are available -in the *Enterprise Edition*. - -Server-level replication ------------------------- - -ArangoDB supports asynchronous replication functionality since version 1.4, but -replicating from a master server with multiple databases required manual setup -on the slave for each individual database to replicate. When a new database was -created on the master, one needed to take action on the slave to ensure that data -for that database got actually replicated. Replication on the slave also was not -aware of when a database was dropped on the master. - -3.3 adds [server-level replication](../Administration/MasterSlave/ServerLevelSetup.md), -which will replicate the current and future databases from the master to the -slave automatically after the initial setup. - -In order to set up global replication on a 3.3 slave for all databases of a given -3.3 master, there is now the so-called `globalApplier`. It has the same interface -as the existing `applier`, but it will replicate from all databases of the -master and not just a single one. - -In order to start the replication on the slave and make it replicate all -databases from a given master, use these commands on the slave: - -```js -var replication = require("@arangodb/replication"); - -replication.setupReplicationGlobal({ - endpoint: "tcp://127.0.0.1:8529", - username: "root", - password: "", - autoStart: true -}); -``` - -To check if the applier is running, also use the `globalApplier` object: - -```js -replication.globalApplier.state().state -``` - -The server-level replication requires both the master and slave servers to -ArangoDB version 3.3 or higher. - -Asynchronous failover ---------------------- - -A resilient setup can now easily be achieved by running a pair of connected servers, -of which one instance becomes the master and the other an asynchronously replicating -slave, with automatic failover between them. - -Two servers are connected via asynchronous replication. One of the servers is -elected leader, and the other one is made a follower automatically. At startup, -the two servers fight for leadership. The follower will automatically start -replication from the master for all available databases, using the server-level -replication introduced in 3.3. - -When the master goes down, this is automatically detected by an agency -instance, which is also started in this mode. This instance will make the -previous follower stop its replication and make it the new leader. - -The follower will automatically deny all read and write requests from client -applications. Only the replication itself is allowed to access the follower's data -until the follower becomes a new leader. - -When sending a request to read or write data on a follower, the follower will -always respond with `HTTP 503 (Service unavailable)` and provide the address of -the current leader. Client applications and drivers can use this information to -then make a follow-up request to the proper leader: - -``` -HTTP/1.1 503 Service Unavailable -X-Arango-Endpoint: http://[::1]:8531 -.... -``` - -Client applications can also detect who the current leader and the followers -are by calling the `/_api/cluster/endpoints` REST API. This API is accessible -on leaders and followers alike. - -The ArangoDB starter supports starting two servers with asynchronous -replication and failover out of the box. - -The arangojs driver for JavaScript, the Go driver and the Java driver for -ArangoDB support automatic failover in case the currently accessed server endpoint -responds with HTTP 503. - -Blog article: -[Introducing the new ArangoDB Java driver with load balancing and advanced fallback](https://www.arangodb.com/2017/12/introducing-the-new-arangodb-java-driver-load-balancing/) - -RocksDB throttling ------------------- - -ArangoDB 3.3 allows write operations to the RocksDB engine be throttled, in -order to prevent longer write stalls. The throttling is adaptive, meaning that it -automatically adapts to the actual write rate. This results in much more stable -response times, which is better for client applications and cluster health -tests, because timeouts caused by write stalls are less likely to occur and -the server thus not mistakenly assumed to be down. - -Blog article: -[RocksDB smoothing for ArangoDB customers](https://www.arangodb.com/2017/11/rocksdb-smoothing-arangodb-customers/) - -Faster shard creation in cluster --------------------------------- - -When using a cluster, one normally wants resilience, so `replicationFactor` -is set to at least `2`. The number of shards is often set to rather high values -when creating collections. - -Creating a collection in the cluster will make the coordinator store the setup -metadata of the new collection in the agency first. Subsequentially all database -servers of the cluster will detect that there is work to do and will begin creating -the shards. This will first happen for the shard leaders. For each shard leader -that finishes with the setup, the synchronous replication with its followers is -then established. That will make sure that every future data modification will not -become effective on the leader only, but also on all the followers. - -In 3.3 this setup protocol has got some shortcuts for the initial shard creation, -which speeds up collection creation by roughly 50 to 60 percent. - -LDAP authentication -------------------- - -The LDAP authentication module in the *Enterprise Edition* has been enhanced. -The following options have been added to it: - -- the option `--server.local-authentication` controls whether the local *_users* - collection is also used for looking up users. This is also the default behavior. - If the authentication shall be restricted to just the LDAP directory, the - option can be set to *true*, and arangod will then not make any queries to its - *_users* collection when looking up users. - -- the option `--server.authentication-timeout` controls the expiration time for - cached LDAP user information entries in arangod. - -- basic role support has been added for the LDAP module in the *Enterprise Edition*. - New configuration options for LDAP in 3.3 are: - - - `--ldap.roles-attribute-name` - - `--ldap.roles-transformation` - - `--ldap.roles-search` - - `--ldap.roles-include` - - `--ldap.roles-exclude` - - `--ldap.superuser-role` - - Please refer to [LDAP](../Programs/Arangod/Ldap.md) for a detailed - explanation. - - -Miscellaneous features ----------------------- - -- when creating a collection in the cluster, there is now an optional - parameter `enforceReplicationFactor`: when set, this parameter - enforces that the collection will only be created if there are not - enough database servers available for the desired `replicationFactor`. - -- AQL DISTINCT is not changing the order of previous (sorted) results - - Previously the implementation of AQL distinct stored all encountered values - in a hash table internally. When done, the final results were returned in the - order dictated by the hash table that was used to store the keys. This order - was more or less unpredictable. Though this was documented behavior, it was - inconvenient for end users. - - 3.3 now does not change the sort order of the result anymore when DISTINCT - is used. - -- Several AQL functions have been implemented in C++, which can help save - memory and CPU time for converting the function arguments and results. - The following functions have been ported: - - - LEFT - - RIGHT - - SUBSTRING - - TRIM - - MATCHES - -- The ArangoShell prompt substitution characters have been extended. Now the - following extra substitutions can be used for the arangosh prompt: - - - '%t': current time as timestamp - - '%a': elpased time since ArangoShell start in seconds - - '%p': duration of last command in seconds - - For example, to show the execution time of the last command executed in arangosh - in the shell's prompt, start arangosh using: - - arangosh --console.prompt "%E@%d %p> " - -- There are new startup options for the logging to aid debugging and error reporting: - - - `--log.role`: will show one-letter code of server role (A = agent, C = coordinator, ...) - This is especially useful when aggregating logs. - - The existing roles used in logs are: - - - U: undefined/unclear (used at startup) - - S: single server - - C: coordinator - - P: primary - - A: agent - - - `--log.line-number true`: this option will now additionally show the name of the C++ - function that triggered the log message (file name and line number were already logged - in previous versions) - - - `--log.thread-name true`: this new option will log the name of the ArangoDB thread that - triggered the log message. Will have meaningful output on Linux only - -- make the ArangoShell (arangosh) refill its collection cache when a yet-unknown collection - is first accessed. This fixes the following problem when working with the shell while - in another shell or by another process a new collection is added: - - arangosh1> db._collections(); // shell1 lists all collections - arangosh2> db._create("test"); // shell2 now creates a new collection 'test' - arangosh1> db.test.insert({}); // shell1 is not aware of the collection created - // in shell2, so the insert will fail - diff --git a/Documentation/Books/Manual/ReleaseNotes/NewFeatures34.md b/Documentation/Books/Manual/ReleaseNotes/NewFeatures34.md deleted file mode 100644 index d0bdfe285020..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/NewFeatures34.md +++ /dev/null @@ -1,1144 +0,0 @@ -Features and Improvements in ArangoDB 3.4 -========================================= - -The following list shows in detail which features have been added or improved in -ArangoDB 3.4. ArangoDB 3.4 also contains several bug fixes that are not listed -here. - -ArangoSearch ------------- - -ArangoSearch is a sophisticated, integrated full-text search solution over -a user-defined set of attributes and collections. It is the first type of -view in ArangoDB. - -- [ArangoSearch tutorial](https://www.arangodb.com/tutorials/arangosearch/) -- [ArangoSearch overview](../Views/ArangoSearch/README.md) -- [ArangoSearch in AQL](../../AQL/Views/ArangoSearch/index.html) - - -New geo index implementation ----------------------------- - -### S2 based geo index - -The geo index in ArangoDB has been reimplemented based on [S2 library](http://s2geometry.io/) -functionality. The new geo index allows indexing points, but also indexing of more -complex geographical objects. The new implementation is much faster than the previous one for -the RocksDB engine. - -Additionally, several AQL functions have been added to facilitate working with -geographical data: `GEO_POINT`, `GEO_MULTIPOINT`, `GEO_LINESTRING`, `GEO_MULTILINESTRING`, -`GEO_POLYGON` and `GEO_MULTIPOLYGON`. These functions will produce GeoJSON objects. - -Additionally there are new geo AQL functions `GEO_CONTAINS`, `GEO_INTERSECTS` and `GEO_EQUALS` -for querying and comparing GeoJSON objects. - -### AQL Editor GeoJSON Support - -As a feature on top, the web ui embedded AQL editor now supports also displaying all -GeoJSON supported data. - - -RocksDB storage engine ----------------------- - -### RocksDB as default storage engine - -The default storage engine in ArangoDB 3.4 is now the RocksDB engine. - -Previous versions of ArangoDB used MMFiles as the default storage engine. This -change will have an effect for new ArangoDB installations only, and only if no -storage engine is selected explicitly or the storage engine selected is "auto". -In this case, a new installation will default to the RocksDB storage engine. - -Existing ArangoDB installations upgraded to 3.4 from previous versions will -continue to use their previously selected storage engine. - -### Optimized binary storage format - -The RocksDB storage engine in ArangoDB 3.4 now also uses an optimized binary -format for storing documents. This format allows inserting new documents in -an order that RocksDB prefers. Using the new format will reduce the number -of compactions that RocksDB needs to do for the ArangoDB documents stored, -allowing for better long-term insertion performance. - -The new binary format will **only be used for new installations** that start with -ArangoDB 3.4. Existing installations upgraded from previous versions will -continue to use the previous binary format. - -Note that there is no need to use the new binary format for installations upgraded -from 3.3, as the old binary format will continue to work as before. -In order to use the new binary format with existing data, it is required to -create a logical dump of the database data, shut down the server, erase the -database directory and restore the data from the logical dump. To minimize -downtime you can alternatively run a second arangod instance in your system, -that replicates the original data; once the replication has reached completion, -you can switch the instances. - -### Better control of RocksDB WAL sync interval - -ArangoDB 3.4 also provides a new configuration option `--rocksdb.sync-interval` -to control how frequently ArangoDB will automatically synchronize data in RocksDB's -write-ahead log (WAL) files to disk. Automatic syncs will only be performed for -not-yet synchronized data, and only for operations that have been executed without -the *waitForSync* attribute. - -Automatic synchronization of RocksDB WAL file data is performed by a background -thread in ArangoDB. The default sync interval is 100 milliseconds. This can be -adjusted so syncs happen more or less frequently. - -### Reduced replication catch-up time - -The catch-up time for comparing the contents of two collections (or shards) on two -different hosts via the incremental replication protocol has been reduced when using -the RocksDB storage engine. - -### Improved RocksDB geo index performance - -The rewritten geo index implementation 3.4 speeds up the RocksDB-based geo index -functionality by a factor of 3 to 6 for many common cases when compared to the -RocksDB-based geo index in 3.3. - -A notable implementation detail of previous versions of ArangoDB was that accessing -a RocksDB collection with a geo index acquired a collection-level lock. This severely -limited concurrent access to RocksDB collections with geo indexes in previous -versions. This requirement is now gone and no extra locks need to be acquired when -accessing a RocksDB collection with a geo index. - -### Optional caching for documents and primary index values - -The RocksDB engine now provides a new per-collection property `cacheEnabled` which -enables in-memory caching of documents and primary index entries. This can potentially -speed up point-lookups significantly, especially if collection have a subset of frequently -accessed documents. - -The option can be enabled for a collection as follows: -``` -db..properties({ cacheEnabled: true }); -``` - -If the cache is enabled, it will be consulted when reading documents and primary index -entries for the collection. If there is a cache miss and the document or primary index -entry has to be looked up from the RocksDB storage engine, the cache will be populated. - -The per-collection cache utilization for primary index entries can be checked via the -command `db..indexes(true)`, which will provide the attributes `cacheInUse`, -`cacheSize` and `cacheLifeTimeHitRate`. - -Memory for the documents and primary index entries cache will be provided by ArangoDB's -central cache facility, whose maximal size can be configured by adjusting the value of -the startup option `--cache.size`. - -Please note that caching may adversely affect the performance for collections that are -frequently updated. This is because cache entries need to be invalidated whenever documents -in the collection are updated, replaced or removed. Additionally, enabling caching will -subtract memory from the overall cache, so that less RAM may be available for other -items that use in-memory caching (e.g. edge index entries). It is therefore recommended -to turn on caching only for dedicated collections for which the caching effects have been -confirmed to be positive. - -### Exclusive collection access option - -In contrast to the MMFiles engine, the RocksDB engine does not require collection-level -locks. This is good in general because it allows concurrent access to a RocksDB -collection. - -Reading documents does not require any locks with the RocksDB engine, and writing documents -will acquire per-document locks. This means that different documents can be modified -concurrently by different transactions. - -When concurrent transactions modify the same documents in a RocksDB collection, there -will be a write-write conflict, and one of the transactions will be aborted. This is -incompatible with the MMFiles engine, in which write-write conflicts are impossible due -to its collection-level locks. In the MMFiles engine, a write transaction always has -exclusive access to a collection, and locks out all other writers. - -While making access to a collection exclusive is almost always undesired from the -throughput perspective, it can greatly simplify client application development. Therefore -the RocksDB engine now provides optional exclusive access to collections on a -per-query/per-transaction basis. - -For AQL queries, all data-modification operations now support the `exclusive` option, e.g. - - FOR doc IN collection - UPDATE doc WITH { updated: true } IN collection OPTIONS { exclusive: true } - -JavaScript-based transactions can specify which collections to lock exclusively in the -`exclusive` sub-attribute of their `collections` attribute: - -```js -db._executeTransaction({ - collections: { - exclusive: [ "collection" ] - }, - ... -}); -``` - -Note that using exclusive access for RocksDB collections will serialize write operations -to RocksDB collections, so it should be used with extreme care. - - -### RocksDB library upgrade - -The version of the bundled RocksDB library was upgraded from 5.6 to 5.16. - -The version of the bundled Snappy compression library used by RocksDB was upgraded from -1.1.3 to 1.1.7. - - -Collection and document operations ----------------------------------- - -### Repsert operation - -The existing functionality for inserting documents got an extra option to turn -an insert into a replace, in case that a document with the specified `_key` value -already exists. This type of operation is called a "Repsert" (Replace-insert). - -Using the new option client applications do not need to check first whether a -given document exists, but can use a single atomic operation to conditionally insert -or replace it. - -Here is an example of control flow that was previously necessary to conditionally -insert or replace a document: - -```js -doc = { _key: "someKey", value1: 123, value2: "abc" }; - -// check if the document already exists... -if (!db.collection.exists(doc._key)) { - // ... document did not exist, so insert it - db.collection.insert(doc); -} else { - // ... document did exist, so replace it - db.collection.replace(doc._key, doc); -} -``` - -With ArangoDB 3.4 this can now be simplified to: - -```js -doc = { _key: "someKey", value1: 123, value2: "abc" }; - -// insert the document if it does not exist yet, other replace -db.collection.insert(doc, { overwrite: true }); -``` - -Client applications can also optionally retrieve the old revision of the document -in case the insert turned into a replace operation: - -```js -doc = { _key: "someKey", value1: 123, value2: "abc" }; - -// insert the document if it does not exist yet, other replace -// in case of a replace, previous will be populated, in case of an -// insert, previous will be undefined -previous = db.collection.insert(doc, { overwrite: true, returnOld: true }).old; -``` - -The same functionality is available for the document insert method in the -HTTP REST API. The HTTP endpoint for `POST /_api/document` will now accept the -optional URL parameters `overwrite` and `returnOld`. - -AQL also supports making an INSERT a conditional REPSERT. In contrast to regular -INSERT it supports returning the OLD and the NEW document on disk to i.e. inspect -the revision or the previous content of the document. -AQL INSERT is switched to REPSERT by setting the option `overwrite` for it: - -``` -INSERT { - _key: "someKey", - value1: 123, - value2: "abc" -} INTO collection OPTIONS { overwrite: true } -RETURN OLD -``` - -Please note that in a cluster setup the Repsert operation requires the collection -to be sharded by `_key`. - - -### Graph API extensions - -The REST APIs for modifying graphs at endpoint `/_api/gharial` now support returning -the old revision of vertices / edges after modifying them. The APIs also supports -returning the just-inserted vertex / edge. This is in line with the already existing -single-document functionality provided at endpoint `/_api/document`. - -The old/new revisions can be accessed by passing the URL parameters `returnOld` and -`returnNew` to the following endpoints: - -* /_api/gharial/<graph>/vertex/<collection> -* /_api/gharial/<graph>/edge/<collection> - -The exception from this is that the HTTP DELETE verb for these APIs does not -support `returnOld` because that would make the existing API incompatible. - -### Additional key generators - -In addition to the existing key generators `traditional` (which is still the -default key generator) and `autoincrement`, ArangoDB 3.4 adds the following key -generators: - -* `padded`: - The `padded` key generator generates keys of a fixed length (16 bytes) in - ascending lexicographical sort order. This is ideal for usage with the RocksDB - engine, which will slightly benefit keys that are inserted in lexicographically - ascending order. The key generator can be used in a single-server or cluster. - -* `uuid`: the `uuid` key generator generates universally unique 128 bit keys, which - are stored in hexadecimal human-readable format. This key generator can be used - in a single-server or cluster to generate "seemingly random" keys. The keys - produced by this key generator are not lexicographically sorted. - -Generators may be chosen with the creation of collections; here an example for -the *padded* key generator: -``` -db._create("padded", { keyOptions: { type: "padded" } }); - -db.padded.insert({}); -{ - "_id" : "padded/0000000009d0d1c0", - "_key" : "0000000009d0d1c0", - "_rev" : "_XI6VqNK--_" -} - -db.padded.insert({}); -{ - "_id" : "padded/0000000009d0d1c4", - "_key" : "0000000009d0d1c4", - "_rev" : "_XI6VquC--_" -} -``` - -Example for the *uuid* key generator: -```js -db._create("uuid", { keyOptions: { type: "uuid" } }); - -db.uuid.insert({}); - -{ - "_id" : "uuid/16d5dc96-79d6-4803-b547-5a34ce795099", - "_key" : "16d5dc96-79d6-4803-b547-5a34ce795099", - "_rev" : "_XI6VPc2--_" -} - -db.uuid.insert({}); -{ - "_id" : "uuid/0af83d4a-56d4-4553-a97d-c7ed2644dc09", - "_key" : "0af83d4a-56d4-4553-a97d-c7ed2644dc09", - "_rev" : "_XI6VQgO--_" -} -``` - -### Miscellaneous improvements - -The command `db..indexes()` was added as an alias for the already existing -`db..getIndexes()` method for retrieving all indexes of a collection. The -alias name is more consistent with the already existing method names for retrieving -all databases and collections. - - -Cluster improvements --------------------- - -### Load-balancer support - -ArangoDB now supports running multiple coordinators behind a load balancer that -randomly routes client requests to the different coordinators. It is not required -anymore that load balancers implement session or connection stickiness on behalf -of ArangoDB. - -In particular, the following ArangoDB APIs were extended to work well with load -balancing: - -* the cursor API at endpoint `/_api/cursor` -* the jobs API at endpoint `/_api/job` -* the tasks API at endpoint `/_api/tasks` -* Pregel APIs at endpoint `/_api/pregel` - -Some of these APIs build up coordinator-local state in memory when being first -accessed, and allow accessing further data using follow-up requests. This caused -problems in previous versions of ArangoDB, when load balancers routed the follow -up requests to these APIs to different coordinators that did not have access to -the other coordinator's in-memory state. - -With ArangoDB 3.4, if such an API is accessed by a follow-up request that refers -to state being created on a different coordinator, the actually accessed coordinator -will forward the client request to the correct coordinator. Client applications -and load balancers do not need to be aware of which coordinator they had used -for the previous requests, though from a performance point of view accessing the -same coordinator for a sequence of requests will still be beneficial. - -If a coordinator forwards a request to a different coordinator, it will send the -client an extra HTTP header `x-arango-request-forwarded-to` with the id of the -coordinator it forwarded the request to. Client applications or load balancers -can optionally use that information to make follow-up requests to the "correct" -coordinator to save the forwarding. - -### Refusal to start mixed-engine clusters - -Starting a cluster with coordinators and DB servers using different storage -engines is not supported. Doing it anyway will now log an error and abort a -coordinator's startup. - -Previous versions of ArangoDB did not detect the usage of different storage -engines in a cluster, but the runtime behavior of the cluster was undefined. - -### Advertised endpoints - -It is now possible to configure the endpoints advertised by the -coordinators to clients to be different from the endpoints which are -used for cluster internal communication. This is important for client -drivers which refresh the list of endpoints during the lifetime of the -cluster (which they should do!). In this way one can make the cluster -advertise a load balancer or a separate set of IP addresses for external -access. The new option is called `--cluster.my-advertised-endpoint`. - -### Startup safety checks - -The new option `--cluster.require-persisted-id` can be used to prevent the startup -of a cluster node using the wrong data directory. - -If the option is set to true, then the ArangoDB instance will only start if a -UUID file (containing the instance's cluster-wide ID) is found in the database -directory on startup. Setting this option will make sure the instance is started -using an already existing database directory and not a new one. - -For the first start, the UUID file must either be created manually or the option -must be set to `false` for the initial startup and later be changed to `true`. - -### Coordinator storage engine - -In previous versions of ArangoDB, cluster coordinator nodes used the storage -engine selected by the database administrator (i.e. MMFiles or RocksDB). -Although all database and document data was forwarded from coordinators to be -stored on the database servers and not on the coordinator nodes, the storage -engine used on the coordinator was checking and initializing its on-disk state -on startup. -Especially because no "real" data was stored by the coordinator's storage engine, -using a storage engine here did not provide any value but only introduced -unnecessary potential points of failure. - -As of ArangoDB 3.4, cluster coordinator nodes will now use an internal "cluster" -storage engine, which actually does not store any data. That prevents 3.4 -coordinators from creating any files or directories inside the database directory -except the meta data files such as `ENGINE`, `LOCK`, `SERVER`, `UUID` and `VERSION`. -And as no files need to be read on coordinator startup except these mentioned -files, it also reduces the possibility of data corruption on coordinator nodes. - -### `DBSERVER` role as alias of `PRIMARY` - -When starting a _DBServer_, the value `DBSERVER` can now be specified (as alias of -`PRIMARY`) in the option `--cluster.my-role`. The value `PRIMARY` is still accepted. - -All REST APIs that currently return "PRIMARY" as _role_, will continue to return -"PRIMARY". - - -AQL ---- - -### AQL query profiling - -AQL queries can now be executed with optional profiling, using ArangoDB 3.4's new -`db._queryProfile()` function. - -This new function is a hybrid of the already existing `db._query()` and `db._explain()` -functions: - -* `db._query()` will execute an AQL query, but not show the execution plan nor - runtime profile information -* `db._explain()` will show the query's execution plan, but not execute the query -* `db._queryProfile()` will run the query, collect the runtime costs of each component - of the query, and finally show the query's execution plan with actual runtime information. - This is very useful for debugging AQL query performance and optimizing queries. - -For more information please refer to the [Query Profiling](../../AQL/ExecutionAndPerformance/QueryProfiler.html) -page. - -### Revised cluster-internal AQL protocol - -When running an AQL query in a cluster, the coordinator has to distribute the -individual parts of the AQL query to the relevant shards that will participate -in the execution of the query. - -Up to including ArangoDB 3.3, the coordinator has deployed the query parts to the -individual shards one by one. The more shards were involved in a query, the more -cluster-internal requests this required, and the longer the setup took. - -In ArangoDB 3.4 the coordinator will now only send a single request to each of -the involved database servers (in contrast to one request per shard involved). -This will speed up the setup phase of most AQL queries, which will be noticable for -queries that affect a lot of shards. - -The AQL setup has been changed from a two-step protocol to a single-step protocol, -which additionally reduces the total number of cluster-internal requests necessary -for running an AQL query. - -The internal protocol and APIs have been adjusted so that AQL queries can now get -away with less cluster-internal requests than in 3.3 also after the setup phase. - -Finally, there is now an extra optimization for trivial AQL queries that will only -access a single document by its primary key (see below). - -### AQL functions added - -The following AQL functions have been added in ArangoDB 3.4: - -* `TO_BASE64`: creates the base64-encoded representation of a value -* `TO_HEX`: creates a hex-encoded string representation of a value -* `ENCODE_URI_COMPONENT`: URI-encodes a string value, for later usage in URLs -* `SOUNDEX`: calculates the soundex fingerprint of a string value -* `ASSERT`: aborts a query if a condition is not met -* `WARN`: makes a query produce a warning if a condition is not met -* `IS_KEY`: this function checks if the value passed to it can be used as a document - key, i.e. as the value of the `_key` attribute for a document -* `SORTED`: will return a sorted version of the input array using AQL's internal - comparison order -* `SORTED_UNIQUE`: same as `SORTED`, but additionally removes duplicates -* `COUNT_DISTINCT`: counts the number of distinct / unique items in an array -* `LEVENSHTEIN_DISTANCE`: calculates the Levenshtein distance between two string values -* `REGEX_MATCHES`: finds matches in a string using a regular expression -* `REGEX_SPLIT`: splits a string using a regular expression -* `UUID`: generates a universally unique identifier value -* `TOKENS`: splits a string into tokens using a language-specific text analyzer -* `VERSION`: returns the server version as a string - -The following AQL functions have been added to make working with geographical -data easier: - -* `GEO_POINT` -* `GEO_MULTIPOINT` -* `GEO_POLYGON` -* `GEO_LINESTRING` -* `GEO_MULTILINESTRING` -* `GEO_CONTAINS` -* `GEO_INTERSECTS` -* `GEO_EQUALS`. - -The first five functions will produce GeoJSON objects from coordinate data. The -latter three functions can be used for querying and comparing GeoJSON objects. - -The following AQL functions can now be used as aggregation functions in a -COLLECT statement: - -* `UNIQUE` -* `SORTED_UNIQUE` -* `COUNT_DISTINCT` - -The following function aliases have been created for existing AQL functions: - -* `CONTAINS_ARRAY` is an alias for `POSITION` -* `KEYS` is an alias for `ATTRIBUTES` - -### Distributed COLLECT - -In the general case, AQL COLLECT operations are expensive to execute in a cluster, -because the database servers need to send all shard-local data to the coordinator -for a centralized aggregation. - -The AQL query optimizer can push some parts of certain COLLECT operations to the -database servers so they can do a per-shard aggregation. The database servers can -then send only the already aggregated results to the coordinator for a final aggregation. -For several queries this will reduce the amount of data that has to be transferred -between the database servers servers and the coordinator by a great extent, and thus -will speed up these queries. Work on this has started with ArangoDB 3.3.5, but -ArangoDB 3.4 allows more cases in which COLLECT operations can partially be pushed to -the database servers. - -In ArangoDB 3.3, the following aggregation functions could make use of a distributed -COLLECT in addition to `COLLECT WITH COUNT INTO` and `RETURN DISTINCT`: - -* `COUNT` -* `SUM` -* `MIN` -* `MAX` - -ArangoDB 3.4 additionally enables distributed COLLECT queries that use the following -aggregation functions: - -* `AVERAGE` -* `VARIANCE` -* `VARIANCE_SAMPLE` -* `STDDEV` -* `STDDEV_SAMPLE` - -### Native AQL function implementations - -All built-in AQL functions now have a native implementation in C++. -Previous versions of ArangoDB had AQL function implementations in both C++ and -in JavaScript. - -The JavaScript implementations of AQL functions were powered by the V8 JavaScript -engine, which first required the conversion of all function input into V8's own -data structures, and a later conversion of the function result data into ArangoDB's -native format. - -As all AQL functions are now exclusively implemented in native C++, no more -conversions have to be performed to invoke any of the built-in AQL functions. -This will considerably speed up the following AQL functions and any AQL expression -that uses any of these functions: - -* `APPLY` -* `CALL` -* `CURRENT_USER` -* `DATE_ADD` -* `DATE_COMPARE` -* `DATE_DAYOFWEEK` -* `DATE_DAYOFYEAR` -* `DATE_DAYS_IN_MONTH` -* `DATE_DAY` -* `DATE_DIFF` -* `DATE_FORMAT` -* `DATE_HOUR` -* `DATE_ISO8601` -* `DATE_ISOWEEK` -* `DATE_LEAPYEAR` -* `DATE_MILLISECOND` -* `DATE_MINUTE` -* `DATE_MONTH` -* `DATE_NOW` -* `DATE_QUARTER` -* `DATE_SECOND` -* `DATE_SUBTRACT` -* `DATE_TIMESTAMP` -* `DATE_YEAR` -* `IS_DATESTRING` -* `IS_IN_POLYGON` -* `LTRIM` -* `RTRIM` -* `FIND_FIRST` -* `FIND_LAST` -* `REVERSE` -* `SPLIT` -* `SUBSTITUTE` -* `SHA512` -* `TRANSLATE` -* `WITHIN_RECTANGLE` - -Additionally, the AQL functions `FULLTEXT`, `NEAR` and `WITHIN` now use the native -implementations even when executed in a cluster. In previous versions of ArangoDB, -these functions had native implementations for single-server setups only, but fell -back to using the JavaScript variants in a cluster environment. - -Apart from saving conversion overhead, another side effect of adding native -implementations for all built-in AQL functions is, that AQL does not require the usage -of V8 anymore, except for user-defined functions. - -If no user-defined functions are used in AQL, end users do not need to put aside -dedicated V8 contexts for executing AQL queries with ArangoDB 3.4, making server -configuration less complex and easier to understand. - -### AQL optimizer query planning improvements - -The AQL query optimizer will by default now create at most 128 different execution -plans per AQL query. In previous versions the maximum number of plans was 192. - -Normally the AQL query optimizer will generate a single execution plan per AQL query, -but there are some cases in which it creates multiple competing plans. More plans -can lead to better optimized queries, however, plan creation has its costs. The -more plans are created and shipped through the optimization pipeline, the more -time will be spent in the optimizer. -To make the optimizer better cope with some edge cases, the maximum number of plans -created is now strictly enforced and was lowered compared to previous versions of -ArangoDB. This helps a specific class of complex queries. - -Note that the default maximum value can be adjusted globally by setting the startup -option `--query.optimizer-max-plans` or on a per-query basis by setting a query's -`maxNumberOfPlans` option. - -### Condition simplification - -The query optimizer rule `simplify-conditions` has been added to simplify certain -expressions inside CalculationNodes, which can speed up runtime evaluation of these -expressions. - -The optimizer rule `fuse-filters` has been added to merge adjacent FILTER conditions -into a single FILTER condition where possible, allowing to save some runtime registers. - -### Single document optimizations - -In a cluster, the cost of setting up a distributed query can be considerable for -trivial AQL queries that will only access a single document, e.g. - - FOR doc IN collection FILTER doc._key == ... RETURN doc - FOR doc IN collection FILTER doc._key == ... RETURN 1 - - FOR doc IN collection FILTER doc._key == ... REMOVE doc IN collection - FOR doc IN collection FILTER doc._key == ... REMOVE doc._key IN collection - REMOVE... IN collection - - FOR doc IN collection FILTER doc._key == ... UPDATE doc WITH { ... } IN collection - FOR doc IN collection FILTER doc._key == ... UPDATE doc._key WITH { ... } IN collection - UPDATE ... WITH { ... } IN collection - - FOR doc IN collection FILTER doc._key == ... REPLACE doc WITH { ... } IN collection - FOR doc IN collection FILTER doc._key == ... REPLACE doc._key WITH { ... } IN collection - REPLACE ... WITH { ... } IN collection - - INSERT { ... } INTO collection - -All of the above queries will affect at most a single document, identified by its -primary key. The AQL query optimizer can now detect this, and use a specialized -code path for directly carrying out the operation on the participating database -server(s). This special code path bypasses the general AQL query cluster setup and -shutdown, which would have prohibitive costs for these kinds of queries. - -In case the optimizer makes use of the special code path, the explain output will -contain a node of the type `SingleRemoteOperationNode`, and the optimizer rules -will contain `optimize-cluster-single-document-operations`. - -The optimization will fire automatically only for queries with the above patterns. -It will only fire when using `_key` to identify a single document, -and will be most effective if `_key` is also used as the collection's shard key. - -### Subquery optimizations - -The AQL query optimizer can now optimize certain subqueries automatically so that -they perform less work. - -The new optimizer rule `optimize-subqueries` will fire in the following situations: - -* in case only a few results are used from a non-modifying subquery, the rule will - automatically add a LIMIT statement into the subquery. - - For example, the unbounded subquery - - LET docs = ( - FOR doc IN collection - FILTER ... - RETURN doc - ) - RETURN docs[0] - - will be turned into a subquery that only produces a single result value: - - LET docs = ( - FOR doc IN collection - FILTER ... - LIMIT 1 - RETURN doc - ) - RETURN docs[0] - -* in case the result returned by a subquery is not used later but only the number - of subquery results, the optimizer will modify the result value of the subquery - so that it will return constant values instead of potentially more expensive - data structures. - - For example, the following subquery returning entire documents - - RETURN LENGTH( - FOR doc IN collection - FILTER ... - RETURN doc - ) - - will be turned into a subquery that returns only simple boolean values: - - RETURN LENGTH( - FOR doc IN collection - FILTER ... - RETURN true - ) - - This saves fetching the document data from disk in first place, and copying it - from the subquery to the outer scope. - There may be more follow-up optimizations. - -### COLLECT INTO ... KEEP optimization - -When using an AQL COLLECT ... INTO without a *KEEP* clause, then the AQL query -optimizer will now automatically detect which sub-attributes of the *INTO* variables -are used later in the query. The optimizer will add automatic *KEEP* clauses to -the COLLECT statement then if possible. - -For example, the query - - FOR doc1 IN collection1 - FOR doc2 IN collection2 - COLLECT x = doc1.x INTO g - RETURN { x, all: g[*].doc1.y } - -will automatically be turned into - - FOR doc1 IN collection1 - FOR doc2 IN collection2 - COLLECT x = doc1.x INTO g KEEP doc1 - RETURN { x, all: g[*].doc1.y } - -This prevents variable `doc2` from being temporarily stored in the variable `g`, -which saves processing time and memory, especially for big result sets. - -### Fullcount changes - -The behavior of the `fullCount` option for AQL query cursors was adjusted to conform -to users' demands. The value returned in the `fullCount` result attribute will now -be produced only by the last `LIMIT` statement on the upper most level of the query - -hence `LIMIT` statements in subqueries will not have any effect on the -`fullCount` results any more. - -This is a change to previous versions of ArangoDB, in which the `fullCount` -value was produced by the sequential last `LIMIT` statement in a query, -regardless if the `LIMIT` was on the top level of the query or in a subquery. - -The `fullCount` result value will now also be returned for queries that are served -from the query results cache. - -### Relaxed restrictions for LIMIT values - -The `offset` and `count` values used in an AQL LIMIT clause can now be expressions, as -long as the expressions can be resolved at query compile time. -For example, the following query will now work: - - FOR doc IN collection - LIMIT 0, CEIL(@percent * @count / 100) - RETURN doc - -Previous versions of ArangoDB required the `offset` and `count` values to be -either number literals or numeric bind parameter values. - -### Improved sparse index support - -The AQL query optimizer can now use sparse indexes in more cases than it was able to -in ArangoDB 3.3. If a sparse index is not used in a query because the query optimizer -cannot prove itself that the index attribute value cannot be `null`, it is now often -useful to add an extra filter condition to the query that requires the sparse index' -attribute to be non-null. - -For example, if for the following query there is a sparse index on `value` in any -of the collections, the optimizer cannot prove that `value` can never be `null`: - - FOR doc1 IN collection1 - FOR doc2 IN collection2 - FILTER doc1.value == doc2.value - RETURN [doc1, doc2] - -By adding an extra filter condition to the query that excludes `null` values explicitly, -the optimizer in 3.4 will now be able to use a sparse index on `value`: - - FOR doc1 IN collection1 - FOR doc2 IN collection2 - FILTER doc1.value == doc2.value - FILTER doc2.value != null - RETURN [doc1, doc2] - -The optimizer in 3.3 was not able to detect this, and refused to use sparse indexes -for such queries. - -### Query results cache - -The AQL query results cache in ArangoDB 3.4 has got additional parameters to -control which queries should be stored in the cache. - -In addition to the already existing configuration option `--query.cache-entries` -that controls the maximum number of query results cached in each database's -query results cache, there now exist the following extra options: - -- `--query.cache-entries-max-size`: maximum cumulated size of the results stored - in each database's query results cache -- `--query.cache-entry-max-size`: maximum size for an individual cache result -- `--query.cache-include-system-collections`: whether or not results of queries - that involve system collections should be stored in the query results cache - -These options allow more effective control of the amount of memory used by the -query results cache, and can be used to better utilitize the cache memory. - -The cache configuration can be changed at runtime using the `properties` function -of the cache. For example, to limit the per-database number of cache entries to -256 MB and to limit the per-database cumulated size of query results to 64 MB, -and the maximum size of each individual cache entry to 1MB, the following call -could be used: - -``` -require("@arangodb/aql/cache").properties({ - maxResults: 256, - maxResultsSize: 64 * 1024 * 1024, - maxEntrySize: 1024 * 1024, - includeSystem: false -}); -``` - -The contents of the query results cache can now also be inspected at runtime using -the cache's new `toArray` function: - -``` -require("@arangodb/aql/cache").toArray(); -``` - -This will show all query results currently stored in the query results cache of -the current database, along with their query strings, sizes, number of results -and original query run times. - -The functionality is also available via HTTP REST APIs. - - -### Miscellaneous changes - -When creating query execution plans for a query, the query optimizer was fetching -the number of documents of the underlying collections in case multiple query -execution plans were generated. The optimizer used these counts as part of its -internal decisions and execution plan costs calculations. - -Fetching the number of documents of a collection can have measurable overhead in a -cluster, so ArangoDB 3.4 now caches the "number of documents" that are referred to -when creating query execution plans. This may save a few roundtrips in case the -same collections are frequently accessed using AQL queries. - -The "number of documents" value was not and is not supposed to be 100% accurate -in this stage, as it is used for rough cost estimates only. It is possible however -that when explaining an execution plan, the "number of documents" estimated for -a collection is using a cached stale value, and that the estimates change slightly -over time even if the underlying collection is not modified. - - -Streaming AQL Cursors ---------------------- - -AQL query cursors created by client applications traditionally executed an AQL query, -and built up the entire query result in memory. Once the query completed, the results -were sent back to the client application in chunks of configurable size. - -This approach was a good fit for the MMFiles engine with its collection-level locks, -and usually smaller-than-RAM query results. For the RocksDB engine with its document-level -locks and lock-free reads and potentially huge query results, this approach does not always -fit. - -ArangoDB 3.4 allows to optionally execute AQL queries initiated via the cursor API in a -streaming fashion. The query result will then be calculated on the fly, and results are -sent back to the client application as soon as they become available on the server, even -if the query has not yet completed. - -This is especially useful for queries that produce big result sets (e.g. -`FOR doc IN collection RETURN doc` for big collections). Such queries will take very long -to complete without streaming, because the entire query result will be computed first and -stored in memory. Executing such queries in non-streaming fashion may lead to client -applications timing out before receiving the first chunk of data from the server. Additionally, -creating a huge query result set on the server may make it run out of memory, which is also -undesired. Creating a streaming cursor for such queries will solve both problems. - -Please note that streaming cursors will use resources all the time till you -fetch the last chunk of results. - -Depending on the storage engine used this has different consequences: - -- **MMFiles**: While before collection locks would only be held during the creation of the cursor - (the first request) and thus until the result set was well prepared, - they will now be held until the last chunk requested - by the client through the cursor is processed. - - While Multiple reads are possible, one write operation will effectively stop - all other actions from happening on the collections in question. -- **RocksDB**: Reading occurs on the state of the data when the query - was started. Writing however will happen during working with the cursor. - Thus be prepared for possible conflicts if you have other writes on the collections, - and probably overrule them by `ignoreErrors: True`, else the query - will abort by the time the conflict happenes. - -Taking into account the above consequences, you shouldn't use streaming -cursors light-minded for data modification queries. - -Please note that the query options `cache`, `count` and `fullCount` will not work with streaming -cursors. Additionally, the query statistics, warnings and profiling data will only be available -when the last result batch for the query is sent. Using a streaming cursor will also prevent -the query results being stored in the AQL query results cache. - -By default, query cursors created via the cursor API are non-streaming in ArangoDB 3.4, -but streaming can be enabled on a per-query basis by setting the `stream` attribute -in the request to the cursor API at endpoint `/_api/cursor`. - -However, streaming cursors are enabled automatically for the following parts of ArangoDB in 3.4: - -* when exporting data from collections using the arangoexport binary -* when using `db..toArray()` from the Arango shell - -Please note that AQL queries consumed in a streaming fashion have their own, adjustable -"slow query" threshold. That means the "slow query" threshold can be configured seperately for -regular queries and streaming queries. - -Native implementations ----------------------- - -The following internal and user-facing functionality has been ported from -JavaScript-based implementations to C++-based implementations in ArangoDB 3.4: - -* the statistics gathering background thread -* the REST APIs for - - managing user defined AQL functions - - graph management at `/_api/gharial` that also does: - - vertex management - - edge management -* the implementations of all built-in AQL functions -* all other parts of AQL except user-defined functions -* database creation and setup -* all the DBserver internal maintenance tasks for shard creation, index - creation and the like in the cluster - -By making the listed functionality not use and not depend on the V8 JavaScript -engine, the respective functionality can now be invoked more efficiently in the -server, without requiring the conversion of data between ArangoDB's native format -and V8's internal formats. For the maintenance operations this will lead to -improved stability in the cluster. - -As a consequence, ArangoDB agency and database server nodes in an ArangoDB 3.4 -cluster will now turn off the V8 JavaScript engine at startup entirely and automatically. -The V8 engine will still be enabled on cluster coordinators, single servers and -active failover instances. But even the latter instance types will not require as -many V8 contexts as previous versions of ArangoDB. -This should reduce problems with servers running out of available V8 contexts or -using a lot of memory just for keeping V8 contexts around. - - -Foxx ----- - -The functions `uuidv4` and `genRandomBytes` have been added to the `crypto` module. - -The functions `hexSlice`, `hexWrite` have been added to the `Buffer` object. - -The functions `Buffer.from`, `Buffer.of`, `Buffer.alloc` and `Buffer.allocUnsafe` -have been added to the `Buffer` object for improved compatibility with node.js. - - -Security --------- - -### Ownership for cursors, jobs and tasks - -Cursors for AQL query results created by the API at endpoint `/_api/cursor` -are now tied to the user that first created the cursor. - -Follow-up requests to consume or remove data of an already created cursor will -now be denied if attempted by a different user. - -The same mechanism is also in place for the following APIs: - -- jobs created via the endpoint `/_api/job` -- tasks created via the endpoint `/_api/tasks` - - -### Dropped support for SSLv2 - -ArangoDB 3.4 will not start when attempting to bind the server to a Secure Sockets -Layer (SSL) v2 endpoint. Additionally, the client tools (arangosh, arangoimport, -arangodump, arangorestore etc.) will refuse to connect to an SSLv2-enabled server. - -SSLv2 can be considered unsafe nowadays and as such has been disabled in the OpenSSL -library by default in recent versions. ArangoDB is following this step. - -Clients that use SSLv2 with ArangoDB should change the protocol from SSLv2 to TLSv12 -if possible, by adjusting the value of the `--ssl.protocol` startup option for the -`arangod` server and all client tools. - - -Distribution Packages ---------------------- - -In addition to the OS-specific packages (eg. _rpm_ for Red Hat / CentOS, _deb_ for -Debian, NSIS installer for Windows etc.) starting from 3.4.0 new `tar.gz` archive packages -are available for Linux and Mac. They correspond to the `.zip` packages for Windows, -which can be used for portable installations, and to easily run different ArangoDB -versions on the same machine (e.g. for testing). - - -Client tools ------------- - -### Arangosh - -Starting with ArangoDB version 3.4.5, the ArangoShell (arangosh) provides the option -`--console.history` for controlling whether the shell's command-line history -should be loaded from and persisted in a file. - -The default value for this option is `true`. Setting it to `false` -will make arangosh not load any command-line history from the history -file, and not store the current session's history when the shell is -exited. The command-line history will then only be available in the -current shell session. - -### Arangodump - -Arangodump can now dump multiple collections in parallel. This can significantly -reduce the time required to take a backup. - -By default, arangodump will use 2 threads for dumping collections. The number of -threads used by arangodump can be adjusted by using the `--threads` option when -invoking it. - -### Arangorestore - -Arangorestore can now restore multiple collections in parallel. This can significantly -reduce the time required to recover data from a backup. - -By default, arangorestore will use 2 threads for restoring collections. The number of -threads used by arangorestore can be adjusted by using the `--threads` option when -invoking it. - -### Arangoimport - -Arangoimp was renamed to arangoimport for consistency. -The 3.4 release packages will still install `arangoimp` as a symlink so user scripts -invoking `arangoimp` do not need to be changed. - -[Arangoimport now can pace the data load rate automatically](../Programs/Arangoimport/Details.md#automatic-pacing-with-busy-or-low-throughput-disk-subsystems) -based on the actual rate of -data the server can handle. This is useful in contexts when the server has a limited -I/O bandwidth, which is often the case in cloud environments. Loading data too quickly -may lead to the server exceeding its provisioned I/O operations quickly, which will -make the cloud environment throttle the disk performance and slowing it down drastically. -Using a controlled and adaptive import rate allows preventing this throttling. - -The pacing algorithm is turned on by default, but can be disabled by manually specifying -any value for the `--batch-size` parameter. - -Arangoimport also got an extra option `--create-database` so that it can automatically -create the target database should this be desired. Previous versions of arangoimp -provided options for creating the target collection only -(`--create-collection`, `--create-collection-type`). - -Finally, arangoimport got an option `--latency` which can be used to print microsecond -latency statistics on 10 second intervals for import runs. This can be used to get -additional information about the import run performance and performance development. - - -Miscellaneous features ----------------------- - -### Logging without escaping non-printable characters - -The new option `--log.escape` can be used to enable a slightly different log output -format. - -If set to `true` (which is the default value), then the logging will work as in -previous versions of ArangoDB, and the following characters in the log output are -escaped: - -* the carriage return character (hex 0d) -* the newline character (hex 0a) -* the tabstop character (hex 09) -* any other characters with an ordinal value less than hex 20 - -If the `--log.escape` option is set to `false` however, no characters are escaped -when logging them. Characters with an ordinal value less than hex 20 (including -carriage return, newline and tabstop) will not be printed in this mode, but will -be replaced with a space character (hex 20). This is because these characters are -often undesired in logs anyway. -Another positive side effect of turning off the escaping is that it will slightly -reduce the CPU overhead for logging. However, this will only be noticable when the -logging is set to a very verbose level (e.g. log levels debug or trace). - - -### Active Failover - -The _Active Failover_ mode is now officially supported for multiple slaves. - -Additionally you can now send read-only requests to followers, so you can -use them for read scaling. To make sure only requests that are intended for -this use-case are served by the follower you need to add a -`X-Arango-Allow-Dirty-Read: true` header to HTTP requests. - -For more information see -[Active Failover Architecture](../Architecture/DeploymentModes/ActiveFailover/Architecture.md). diff --git a/Documentation/Books/Manual/ReleaseNotes/NewFeatures35.md b/Documentation/Books/Manual/ReleaseNotes/NewFeatures35.md deleted file mode 100644 index ed478715796c..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/NewFeatures35.md +++ /dev/null @@ -1,530 +0,0 @@ -Features and Improvements in ArangoDB 3.5 -========================================= - -The following list shows in detail which features have been added or improved in -ArangoDB 3.5. ArangoDB 3.5 also contains several bug fixes that are not listed -here. - -AQL ---- - -### SORT-LIMIT optimization - -A new SORT-LIMIT optimization has been added. This optimization will be pulled off -by the query optimizer if there is a SORT statement followed by a LIMIT node, and the -overall number of documents to return is relatively small in relation to the total -number of documents to be sorted. In this case, the optimizer will use a size-constrained -heap for keeping only the required number of results in memory, which can drastically -reduce memory usage and, for some queries, also execution time for the sorting. - -If the optimization is applied, it will show as "sort-limit" rule in the query execution -plan. - -### Index hints in AQL - -Users may now take advantage of the `indexHint` inline query option to override -the internal optimizer decision regarding which index to use to serve content -from a given collection. The index hint works with the named indices feature -above, making it easy to specify which index to use. - -### Sorted primary index (RocksDB engine) - -The query optimizer can now make use of the sortedness of primary indexes if the -RocksDB engine is used. This means the primary index can be utilized for queries -that sort by either the `_key` or `_id` attributes of a collection and also for -range queries on these attributes. - -In the list of documents for a collection in the web interface, the documents will -now always be sorted in lexicographical order of their `_key` values. An exception for -keys representing quasi-numerical values has been removed when doing the sorting in -the web interface. Removing this exception can also speed up the display of the list -of documents. - -This change potentially affects the order in which documents are displayed in the -list of documents overview in the web interface. A document with a key value "10" will -now be displayed before a document with a key value of "9". In previous versions of -ArangoDB this was exactly opposite. - -### Edge index query optimization (RocksDB engine) - -An AQL query that uses the edge index only and returns the opposite side of -the edge can now be executed in a more optimized way, e.g. - - FOR edge IN edgeCollection FILTER edge._from == "v/1" RETURN edge._to - -is fully covered by the RocksDB edge index. - -For MMFiles this rule does not apply. - -### AQL syntax improvements - -AQL now allows the usage of floating point values without leading zeros, e.g. -`.1234`. Previous versions of ArangoDB required a leading zero in front of -the decimal separator, i.e `0.1234`. - -### k Shortest Paths queries - -AQL now allows to perform k Shortest Paths queries, that is, query a number of -paths of increasing length from a start vertex to a target vertex. For more details, -see the [k Shortest Paths documentation](../../AQL/Graphs/KShortestPaths.html). - - -Smart Joins ------------ - -The "smart joins" feature available in the ArangoDB Enterprise Edition allows running -joins between two sharded collections with performance close to that of a local join -operation. - -The prerequisite for this is that the two collections have an identical sharding setup, -established via the `distributeShardsLike` attribute of one of the collections. - -Quick example setup for two collections with identical sharding: - - > db._create("products", { numberOfShards: 3, shardKeys: ["_key"] }); - > db._create("orders", { distributeShardsLike: "products", shardKeys: ["productId"] }); - > db.orders.ensureIndex({ type: "hash", fields: ["productId"] }); - -Now an AQL query that joins the two collections via their shard keys will benefit from -the smart join optimization, e.g. - - FOR p IN products - FOR o IN orders - FILTER p._key == o.productId - RETURN o - -In this query's execution plan, the extra hop via the coordinator can be saved -that is normally there for generic joins. Thanks to the smart join optimization, -the query's execution is as simple as: - - Execution plan: - Id NodeType Site Est. Comment - 1 SingletonNode DBS 1 * ROOT - 3 EnumerateCollectionNode DBS 9 - FOR o IN orders /* full collection scan, 3 shard(s) */ - 7 IndexNode DBS 0 - FOR p IN products /* primary index scan, scan only, 3 shard(s) */ - 10 RemoteNode COOR 0 - REMOTE - 11 GatherNode COOR 0 - GATHER - 6 ReturnNode COOR 0 - RETURN o - -Without the smart join optimization, there will be an extra hop via the -coordinator for shipping the data from each shard of the one collection to -each shard of the other collection, which will be a lot more expensive: - - Execution plan: - Id NodeType Site Est. Comment - 1 SingletonNode DBS 1 * ROOT - 16 IndexNode DBS 3 - FOR p IN products /* primary index scan, index only, projections: `_key`, 3 shard(s) */ - 14 RemoteNode COOR 3 - REMOTE - 15 GatherNode COOR 3 - GATHER - 8 ScatterNode COOR 3 - SCATTER - 9 RemoteNode DBS 3 - REMOTE - 7 IndexNode DBS 3 - FOR o IN orders /* hash index scan, 3 shard(s) */ - 10 RemoteNode COOR 3 - REMOTE - 11 GatherNode COOR 3 - GATHER - 6 ReturnNode COOR 3 - RETURN o - -In the end, smart joins can optimize away a lot of the inter-node network -requests normally required for performing a join between sharded collections. -The performance advantage of smart joins compared to regular joins will grow -with the number of shards of the underlying collections. - -In general, for two collections with `n` shards each, the minimal number of -network requests for the general join (_no_ smart joins optimization) will be -`n * (n + 2)`. The number of network requests increases quadratically with the -number of shards. - -Smart joins can get away with a minimal number of `n` requests here, which scales -linearly with the number of shards. - -Smart joins will also be especially advantageous for queries that have to ship a lot -of data around for performing the join, but that will filter out most of the data -after the join. In this case smart joins should greatly outperform the general join, -as they will eliminate most of the inter-node data shipping overhead. - -Also see the [Smart Joins](../SmartJoins.md) page. - - -Background Index Creation -------------------------- - -Creating new indexes is by default done under an exclusive collection lock. This means -that the collection (or the respective shards) are not available for write operations -as long as the index is created. This "foreground" index creation can be undesirable, -if you have to perform it on a live system without a dedicated maintenance window. - -Starting with ArangoDB 3.5, indexes can also be created in "background", not using an -exclusive lock during the entire index creation. The collection remains basically available, -so that other CRUD operations can run on the collection while the index is being created. -This can be achieved by setting the *inBackground* attribute when creating an index. - -To create an index in the background in *arangosh* just specify `inBackground: true`, -like in the following example: - -```js -db.collection.ensureIndex({ type: "hash", fields: [ "value" ], inBackground: true }); -``` - -Indexes that are still in the build process will not be visible via the ArangoDB APIs. -Nevertheless it is not possible to create the same index twice via the *ensureIndex* API -while an index is still begin created. AQL queries also will not use these indexes until -the index reports back as fully created. Note that the initial *ensureIndex* call or HTTP -request will still block until the index is completely ready. Existing single-threaded -client programs can thus safely set the *inBackground* option to *true* and continue to -work as before. - -Should you be building an index in the background you cannot rename or drop the collection. -These operations will block until the index creation is finished. This is equally the case -with foreground indexing. - -After an interrupted index build (i.e. due to a server crash) the partially built index -will the removed. In the ArangoDB cluster the index might then be automatically recreated -on affected shards. - -Background index creation might be slower than the "foreground" index creation and require -more RAM. Under a write heavy load (specifically many remove, update or replace operations), -the background index creation needs to keep a list of removed documents in RAM. This might -become unsustainable if this list grows to tens of millions of entries. - -Building an index is always a write-heavy operation, so it is always a good idea to build -indexes during times with less load. - -Please note that background index creation is useful only in combination with the RocksDB -storage engine. With the MMFiles storage engine, creating an index will always block any -other operations on the collection. - - -TTL (time-to-live) Indexes --------------------------- - -The new TTL indexes feature provided by ArangoDB can be used for automatically -removing expired documents from a collection. - -TTL indexes support eventual removal of documents which are past a configured -expiration timepoint. The expiration timepoints can be based upon the documents' -original insertion or last-updated timepoints, with adding a period during -which to retain the documents. -Alternatively, expiration timepoints can be specified as absolute values per -document. -It is also possible to exclude documents from automatic expiration and removal. - -Please also note that TTL indexes are designed exactly for the purpose of removing -expired documents from collections. It is *not recommended* to rely on TTL indexes -for user-land AQL queries. This is because TTL indexes internally may store a transformed, -always numerical version of the index attribute value even if it was originally passed in -as a datestring. As a result TTL indexes will likely not be used for filtering and sort -operations in user-land AQL queries. - -Also see the [TTL Indexes](../Indexing/Ttl.md) page. - - -Collections ------------ - -All collections now support a minimum replication factor (minReplicationFactor) property. -This is default set to `1`, which is identical to previous behaviour. -If in a failover scenario a shard of a collection has less than minReplicationFactor many insync followers it will go into "read-only" mode and will reject writes until enough followers are insync again. -In more detail: -Having `minReplicationFactor == 1` means as soon as a "master-copy" is available of the data -writes are allowed. -Having `minReplicationFactor > 1` requires additional insync copies on follower servers to allow writes. -The feature is used to reduce the diverging of data in case of server failures and to help new followers to catch up. - -HTTP API extensions -------------------- - -### Extended index API - -The HTTP API for creating indexes at POST `/_api/index` has been extended two-fold: - -* to create a TTL (time-to-live) index, it is now possible to specify a value of `ttl` - in the `type` attribute. When creating a TTL index, the attribute `expireAfter` is - also required. That attribute contains the expiration time (in seconds), which is - based on the documents' index attribute value. - -* to create an index in background, the attribute `inBackground` can be set to `true`. - -### API for querying the responsible shard - -The HTTP API for collections has got an additional route for retrieving the responsible -shard for a document at PUT `/_api/collection//responsibleShard`. - -When calling this route, the request body is supposed to contain the document for which -the responsible shard should be determined. The response will contain an attribute `shardId` -containing the ID of the shard that is responsible for that document. - -A method `collection.getResponsibleShard(document)` was added to the JS API as well. - -It does not matter if the document actually exists or not, as the shard responsibility -is determined from the document's attribute values only. - -Please note that this API is only meaningful and available on a cluster coordinator. - -### Foxx API for running tests - -The HTTP API for running Foxx service tests now supports a `filter` attribute, -which can be used to limit which test cases should be executed. - - -### Stream Transaction API - -There is a new HTTP API for transactions. This API allows clients to add operations to a -transaction in a streaming fashion. A transaction can consist of a series of supported -transactional operations, followed by a commit or abort command. -This allows clients to construct larger transactions in a more efficent way than -with JavaScript-based transactions. - -Note that this requires client applications to abort transactions which are no -longer necessary. Otherwise resources and locks acquired by the transactions -will hang around until the server decides to garbage-collect them. - -### Minimal replication Factor - -Within the properties of a collection we can now define a minReplicationFactor. -This affects all routes that can create or modify the properties of a collection, including -the graph API `_api/gharial`. All places where a replicationFactor can be modified, can now -modify the minReplicationFactor as well. - -Web interface -------------- - -When using the RocksDB engine, the selection of index types "persistent" and "skiplist" -has been removed from the web interface when creating new indexes. - -The index types "hash", "skiplist" and "persistent" are just aliases of each other -when using the RocksDB engine, so there is no need to offer them all. - - -JavaScript ----------- - -### V8 updated - -The bundled version of the V8 JavaScript engine has been upgraded from 5.7.492.77 to -7.1.302.28. - -Among other things, the new version of V8 provides a native JavaScript `BigInt` type which -can be used to store arbitrary-precision integers. However, to store such `BigInt` objects -in ArangoDB, they need to be explicitly converted to either strings or simple JavaScript -numbers. -Converting BigInts to strings for storage is preferred because converting a BigInt to a -simple number may lead to precision loss. - -```js -// will fail with "bad parameter" error: -value = BigInt("123456789012345678901234567890"); -db.collection.insert({ value }); - -// will succeed: -db.collection.insert({ value: String(value) }); - -// will succeed, but lead to precision loss: -db.collection.insert({ value: Number(value) }); -``` - -The new V8 version also changes the default timezone of date strings to be conditional -on whether a time part is included: - -```js -> new Date("2019-04-01"); -Mon Apr 01 2019 02:00:00 GMT+0200 (Central European Summer Time) - -> new Date("2019-04-01T00:00:00"); -Mon Apr 01 2019 00:00:00 GMT+0200 (Central European Summer Time) -``` -If the timezone is explicitly set in the date string, then the specified timezone will -always be honored: - -```js -> new Date("2019-04-01Z"); -Mon Apr 01 2019 02:00:00 GMT+0200 (Central European Summer Time) -> new Date("2019-04-01T00:00:00Z"); -Mon Apr 01 2019 02:00:00 GMT+0200 (Central European Summer Time) -``` - -### JavaScript security options - -ArangoDB 3.5 provides several new options for restricting the functionality of -JavaScript application code running in the server, with the intent to make a setup -more secure. - -There now exist startup options for restricting which environment variables and -values of which configuration options JavaScript code is allowed to read. These -options can be set to prevent leaking of confidential information from the -environment or the setup into the JavaScript application code. -Additionally there are options to restrict outbound HTTP connections from JavaScript -applications to certain endpoints and to restrict filesystem access from JavaScript -applications to certain directories only. - -Finally there are startup options to turn off the REST APIs for managing Foxx -services, which can be used to prevent installation and uninstallation of Foxx -applications on a server. A separate option is provided to turn off access and -connections to the central Foxx app store via the web interface. - -A complete overview of the security options can be found in [Security Options](../Security/SecurityOptions.md). - -### Foxx - -Request credentials are now exposed via the `auth` property: - -```js -const tokens = context.collection("tokens"); -router.get("/authorized", (req, res) => { - if (!req.auth || !req.auth.bearer || !tokens.exists(req.auth.bearer)) { - res.throw(403, "Not authenticated"); - } - // ... -}); -``` - -### API improvements - -Collections now provide the `documentId` method to derive document ids from keys. - -Before: - -```js -const collection = context.collection("users"); -const documentKey = "my-document-key"; -const documentId = `${collection.name()}/${documentKey}`; -``` - -After: - -```js -const collection = context.collection("users"); -const documentKey = "my-document-key"; -const documentId = collection.documentId(documentKey); -``` - - -Client tools ------------- - -### Dump and restore all databases - -**arangodump** got an option `--all-databases` to make it dump all available databases -instead of just a single database specified via the option `--server.database`. - -When set to true, this makes arangodump dump all available databases the current -user has access to. The option `--all-databases` cannot be used in combination with -the option `--server.database`. - -When `--all-databases` is used, arangodump will create a subdirectory with the data -of each dumped database. Databases will be dumped one after the after. However, -inside each database, the collections of the database can be dumped in parallel -using multiple threads. -When dumping all databases, the consistency guarantees of arangodump are the same -as when dumping multiple single database individually, so the dump does not provide -cross-database consistency of the data. - -**arangorestore** got an option `--all-databases` to make it restore all databases from -inside the subdirectories of the specified dump directory, instead of just the -single database specified via the option `--server.database`. - -Using the option for arangorestore only makes sense for dumps created with arangodump -and the `--all-databases` option. As for arangodump, arangorestore cannot be invoked -with the both options `--all-databases` and `--server.database` at the same time. -Additionally, the option `--force-same-database` cannot be used together with -`--all-databases`. - -If the to-be-restored databases do not exist on the target server, then restoring data -into them will fail unless the option `--create-database` is also specified for -arangorestore. Please note that in this case a database user must be used that has -access to the `_system` database, in order to create the databases on restore. - -### Warning if connected to DBServer - -Under normal circumstances there should be no need to connect to a -database server in a cluster with one of the client tools, and it is -likely that any user operations carried out there with one of the client -tools may cause trouble. - -The client tools arangosh, arangodump and arangorestore will now emit -a warning when connecting with them to a database server node in a cluster. - -Startup option changes ----------------------- - -The value type of the hidden startup option `--rocksdb.recycle-log-file-num` has -been changed from numeric to boolean in ArangoDB 3.5, as the option is also a -boolean option in the underlying RocksDB library. - -Client configurations that use this configuration variable should adjust their -configuration and set this variable to a boolean value instead of to a numeric -value. - - -Miscellaneous -------------- - -### Improved overview of available program options - -The `--help-all` command-line option for all ArangoDB executables will now also -show all hidden program options. - -Previously hidden program options were only returned when invoking arangod or -a client tool with the cryptic `--help-.` option. Now `--help-all` simply retuns -them as well. - -### Fewer system collections - -The system collections `_frontend`, `_modules` and `_routing` are not created -anymore for new databases by default. - -`_modules` and `_routing` are only needed for legacy functionality. -Existing `_routing` collections will not be touched as they may contain user-defined -entries, and will continue to work. - -Existing `_modules` collections will also remain functional. - -The `_frontend` collection may still be required for actions triggered by the -web interface, but it will automatically be created lazily if needed. - -### Named indices - -Indices now have an additional `name` field, which allows for more useful -identifiers. System indices, like the primary and edge indices, have default -names (`primary` and `edge`, respectively). If no `name` value is specified -on index creation, one will be auto-generated (e.g. `idx_13820395`). The index -name _cannot_ be changed after index creation. No two indices on the same -collection may share the same name, but two indices on different collections -may. - -### ID values in log messages - -By default, ArangoDB and its client tools now show a 5 digit unique ID value in -any of their log messages, e.g. - - 2019-03-25T21:23:19Z [8144] INFO [cf3f4] ArangoDB (version 3.5.0 enterprise [linux]) is ready for business. Have fun!. - -In this message, the `cf3f4` is the message's unique ID value. ArangoDB users can -use this ID to build custom monitoring or alerting based on specific log ID values. -Existing log ID values are supposed to stay constant in future releases of arangod. - -Additionally the unique log ID values can be used by the ArangoDB support to find -out which component of the product exactly generated a log message. The IDs also -make disambiguation of identical log messages easier. - -The presence of these ID values in log messages may confuse custom log message filtering -or routing mechanisms that parse log messages and that rely on the old log message -format. - -This can be fixed adjusting any existing log message parsers and making them aware -of the ID values. The ID values are always 5 byte strings, consisting of the characters -`[0-9a-f]`. ID values are placed directly behind the log level (e.g. `INFO`). - -Alternatively, the log IDs can be suppressed in all log messages by setting the startup -option `--log.ids false` when starting arangod or any of the client tools. - - -Internal --------- - -We have moved from C++11 to C++14, which allows us to use some of the simplifications, -features and guarantees that this standard has in stock. -To compile ArangoDB from source, a compiler that supports C++14 is now required. - -The bundled JEMalloc memory allocator used in ArangoDB release packages has been -upgraded from version 5.0.1 to version 5.2.0. - -The bundled version of the RocksDB library has been upgraded from 5.16 to 6.0. diff --git a/Documentation/Books/Manual/ReleaseNotes/README.md b/Documentation/Books/Manual/ReleaseNotes/README.md deleted file mode 100644 index fa9b0f1fac63..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/README.md +++ /dev/null @@ -1,83 +0,0 @@ -Release Notes -============= - -What's New ----------- - -For a high level list of changes, please refer to one of the following sections: - -- What's New in 3.x: - [3.5](NewFeatures35.md), - [3.4](NewFeatures34.md), - [3.3](NewFeatures33.md), - [3.2](NewFeatures32.md), - [3.1](NewFeatures31.md), - [3.0](NewFeatures30.md) -- What's New in 2.x: - [2.8](NewFeatures28.md), - [2.7](NewFeatures27.md), - [2.6](NewFeatures26.md), - [2.5](NewFeatures25.md), - [2.4](NewFeatures24.md), - [2.3](NewFeatures23.md), - [2.2](NewFeatures22.md), - [2.1](NewFeatures21.md) - -Changelogs ----------- - -For a detailed list of changes to the ArangoDB core programs and tools, -please refer to the version specific changelogs: - -- Changelogs 3.x: - [3.4](https://raw.githubusercontent.com/arangodb/arangodb/3.4/CHANGELOG), - [3.3](https://raw.githubusercontent.com/arangodb/arangodb/3.3/CHANGELOG), - [3.2](https://raw.githubusercontent.com/arangodb/arangodb/3.2/CHANGELOG), - [3.1](https://raw.githubusercontent.com/arangodb/arangodb/3.1/CHANGELOG), - [3.0](https://raw.githubusercontent.com/arangodb/arangodb/3.0/CHANGELOG) -- Changelogs 2.x: - [2.8](https://raw.githubusercontent.com/arangodb/arangodb/2.8/CHANGELOG), - [2.7](https://raw.githubusercontent.com/arangodb/arangodb/2.7/CHANGELOG), - [2.6](https://raw.githubusercontent.com/arangodb/arangodb/2.6/CHANGELOG), - [2.5](https://raw.githubusercontent.com/arangodb/arangodb/2.5/CHANGELOG), - [2.4](https://raw.githubusercontent.com/arangodb/arangodb/2.4/CHANGELOG), - [2.3](https://raw.githubusercontent.com/arangodb/arangodb/2.3/CHANGELOG), - [2.2](https://raw.githubusercontent.com/arangodb/arangodb/2.2/CHANGELOG), - [2.1](https://raw.githubusercontent.com/arangodb/arangodb/2.1/CHANGELOG) - -Additional changelogs for tools not included in the main repository: - -- [ArangoDB Starter](https://github.com/arangodb-helper/arangodb/blob/master/CHANGELOG.md) -- [ArangoSync](https://github.com/arangodb/arangosync/blob/master/CHANGELOG) - -Incompatible changes --------------------- - -For a list of incompatible changes, please refer to one of the following sections. -Also see [Version Specific Upgrade Information](../Upgrading/VersionSpecific/README.md) -in the _Upgrading_ chapter. - -- Incompatible changes in 3.x: - [3.4](UpgradingChanges34.md), - [3.3](UpgradingChanges33.md), - [3.2](UpgradingChanges32.md), - [3.1](UpgradingChanges31.md), - [3.0](UpgradingChanges30.md) -- Incompatible changes in 2.x: - [2.8](UpgradingChanges28.md), - [2.7](UpgradingChanges27.md), - [2.6](UpgradingChanges26.md), - [2.5](UpgradingChanges25.md), - [2.4](UpgradingChanges24.md), - [2.3](UpgradingChanges23.md) - -Known Issues ------------- - -For a list of known issues, please refer to one of the following sections: - -- Known Issues in 3.x: - [3.5](KnownIssues35.md), - [3.4](KnownIssues34.md), - [3.3](KnownIssues33.md), - [3.2](KnownIssues32.md) diff --git a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges23.md b/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges23.md deleted file mode 100644 index b7aab14b8c93..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges23.md +++ /dev/null @@ -1,331 +0,0 @@ -Incompatible changes in ArangoDB 2.3 -==================================== - -It is recommended to check the following list of incompatible changes **before** -upgrading to ArangoDB 2.3, and adjust any client programs if necessary. - - -Configuration file changes --------------------------- - -### Threads and contexts - -The number of server threads specified is now the minimum of threads -started. There are situation in which threads are waiting for results of -distributed database servers. In this case the number of threads is dynamically -increased. - -With ArangoDB 2.3, the number of server threads can be configured independently -of the number of V8 contexts. The configuration option -`--javascript.v8-contexts` was added to arangod to provide better control over -the number of V8 contexts created in arangod. - -Previously, the number of V8 contexts arangod created at startup was equal -to the number of server threads (as specified by option `--server.threads`). - -In some situations it may be more sensible to create different amounts of threads -and V8 contexts. This is because each V8 contexts created will consume memory -and requires CPU resources for periodic garbage collection. Contrary, server -threads do not have such high memory or CPU footprint. - -If the option `--javascript.v8-contexts` is not specified, the number of V8 -contexts created at startup will remain equal to the number of server threads. -Thus no change in configuration is required to keep the same behavior as in -previous ArangoDB versions. - -If you are using the default config files or merge them with your local config -files, please review if the default number of server threads is okay in your -environment. Additionally you should verify that the number of V8 contexts -created (as specified in option `--javascript.v8-contexts`) is okay. - -### Syslog - -The command-line option `--log.syslog` was used in previous versions of -ArangoDB to turn logging to syslog on or off: when setting to a non-empty -string, syslog logging was turned on, otherwise turned off. -When syslog logging was turned on, logging was done with the application -name specified in `--log.application`, which defaulted to `triagens`. -There was also a command-line option `--log.hostname` which could be set -but did not have any effect. - -This behavior turned out to be unintuitive and was changed in 2.3 as follows: - -* the command-line option `--log.syslog` is deprecated and does not have any - effect when starting ArangoDB. -* to turn on syslog logging in 2.3, the option `--log.facility` has to be set - to a non-empty string. The value for `facility` is OS-dependent (possible - values can be found in `/usr/include/syslog.h` or the like - `user` should - be available on many systems). -* the default value for `--log.application` has been changed from `triagens` to - `arangod`. -* the command-line option `--log.hostname` is deprecated and does not have any - effect when starting ArangoDB. Instead, the host name will be set by syslog - automatically. -* when logging to syslog, ArangoDB now omits the datetime prefix and the process - id, because they'll be added by syslog automatically. - - -AQL ---- - -### AQL queries throw less exceptions - -ArangoDB 2.3 contains a completely rewritten AQL query optimizer and execution -engine. This means that AQL queries will be executed with a different engine than -in ArangoDB 2.2 and earlier. Parts of AQL queries might be executed in different -order than before because the AQL optimizer has more freedom to move things -around in a query. - -In previous versions of ArangoDB, AQL queries aborted with an exception in many -situations and threw a runtime exception. Exceptions were thrown when trying to -find a value using the `IN` operator in a non-array element, when trying to use -non-boolean values with the logical operands `&&` or `||` or `!`, when using non-numeric -values in arithmetic operations, when passing wrong parameters into functions etc. - -In ArangoDB 2.3 this has been changed in many cases to make AQL more user-friendly -and to allow the optimization to perform much more query optimizations. - -Here is a summary of changes: -- when a non-array value is used on the right-hand side of the `IN` operator, the - result will be `false` in ArangoDB 2.3, and no exception will be thrown. -- the boolean operators `&&` and `||` do not throw in ArangoDB 2.3 if any of the - operands is not a boolean value. Instead, they will perform an implicit cast of - the values to booleans. Their result will be as follows: - - `lhs && rhs` will return `lhs` if it is `false` or would be `false` when converted - into a boolean. If `lhs` is `true` or would be `true` when converted to a boolean, - `rhs` will be returned. - - `lhs || rhs` will return `lhs` if it is `true` or would be `true` when converted - into a boolean. If `lhs` is `false` or would be `false` when converted to a boolean, - `rhs` will be returned. - - `! value` will return the negated value of `value` converted into a boolean -- the arithmetic operators (`+`, `-`, `*`, `/`, `%`) can be applied to any value and - will not throw exceptions when applied to non-numeric values. Instead, any value used - in these operators will be casted to a numeric value implicitly. If no numeric result - can be produced by an arithmetic operator, it will return `null` in ArangoDB 2.3. This - is also true for division by zero. -- passing arguments of invalid types into AQL functions does not throw a runtime - exception in most cases, but may produce runtime warnings. Built-in AQL functions that - receive invalid arguments will then return `null`. - - -### Nested FOR loop execution order - -The query optimizer in 2.3 may permute the order of nested `FOR` loops in AQL queries, -provided that exchanging the loops will not alter a query result. However, a change -in the order of returned values is allowed because no sort order is guaranteed by AQL -(and was never) unless an explicit `SORT` statement is used in a query. - - -### Changed return values of ArangoQueryCursor.getExtra() - -The return value of `ArangoQueryCursor.getExtra()` has been changed in ArangoDB 2.3. -It now contains a `stats` attribute with statistics about the query previously executed. -It also contains a `warnings` attribute with warnings that happened during query -execution. The return value structure has been unified in 2.3 for both read-only and -data-modification queries. - -The return value looks like this for a read-only query: - -``` -arangosh> stmt = db._createStatement("FOR i IN mycollection RETURN i"); stmt.execute().getExtra() -{ - "stats" : { - "writesExecuted" : 0, - "writesIgnored" : 0, - "scannedFull" : 2600, - "scannedIndex" : 0 - }, - "warnings" : [ ] -} -``` - -For data-modification queries, ArangoDB 2.3 returns a result with the same structure: - -``` -arangosh> stmt = db._createStatement("FOR i IN xx REMOVE i IN xx"); stmt.execute().getExtra() -{ - "stats" : { - "writesExecuted" : 2600, - "writesIgnored" : 0, - "scannedFull" : 2600, - "scannedIndex" : 0 - }, - "warnings" : [ ] -} -``` - -In ArangoDB 2.2, the return value of `ArangoQueryCursor.getExtra()` was empty for read-only -queries and contained an attribute `operations` with two sub-attributes for data-modification -queries: - -``` -arangosh> stmt = db._createStatement("FOR i IN mycollection RETURN i"); stmt.execute().getExtra() -{ -} -``` - -``` -arangosh> stmt = db._createStatement("FOR i IN mycollection REMOVE i IN mycollection"); stmt.execute().getExtra() -{ - "operations" : { - "executed" : 2600, - "ignored" : 0 - } -} -``` - -### Changed return values in HTTP method `POST /_api/cursor` - -The previously mentioned change also leads to the statistics being returned in the -HTTP REST API method `POST /_api/cursor`. Previously, the return value contained -an optional `extra` attribute that was filled only for data-modification queries and in -some other cases as follows: - -``` -{ - "result" : [ ], - "hasMore" : false, - "extra" : { - "operations" : { - "executed" : 2600, - "ignored" : 0 - } - } -} -``` - -With the changed result structure in ArangoDB 2.3, the `extra` attribute in the result -will look like this: - -``` -{ - "result" : [], - "hasMore" : false, - "extra" : { - "stats" : { - "writesExecuted" : 2600, - "writesIgnored" : 0, - "scannedFull" : 0, - "scannedIndex" : 0 - }, - "warnings" : [ ] - } -} -``` - -If the query option `fullCount` is requested, the `fullCount` result value will also -be returned inside the `stats` attribute of the `extra` attribute, and not directly -as an attribute inside the `extra` attribute as in 2.2. Note that a `fullCount` will -only be present in `extra`.`stats` if it was requested as an option for the query. - -The result in ArangoDB 2.3 will also contain a `warnings` attribute with the array of -warnings that happened during query execution. - - -### Changed return values in ArangoStatement.explain() - -The return value of `ArangoStatement.explain()` has changed significantly in -ArangoDB 2.3. The new return value structure is not compatible with the structure -returned by 2.2. - -In ArangoDB 2.3, the full execution plan for an AQL query is returned alongside all -applied optimizer rules, optimization warnings etc. It is also possible to have the -optimizer return all execution plans. This required a new data structure. - -Client programs that use `ArangoStatement.explain()` or the HTTP REST API method -`POST /_api/explain` may need to be adjusted to use the new return format. - - -The return value of `ArangoStatement.parse()` has been extended in ArangoDB 2.3. -In addition to the existing attributes, ArangoDB 2.3 will also return an `ast` attribute -containing the abstract syntax tree of the statement. This extra attribute can -safely be ignored by client programs. - - -### Variables not updatable in queries - -Previous versions of ArangoDB allowed the modification of variables inside AQL -queries, e.g. - -``` -LET counter = 0 -FOR i IN 1..10 - LET counter = counter + 1 - RETURN counter -``` - -While this is admittedly a convenient feature, the new query optimizer design did not -allow to keep it. Additionally, updating variables inside a query would prevent a lot -of optimizations to queries that we would like the optimizer to make. Additionally, -updating variables in queries that run on different nodes in a cluster would like cause -non-deterministic behavior because queries are not executed linearly. - - -### Changed return value of `TO_BOOL` - -The AQL function `TO_BOOL` now always returns *true* if its argument is an array or an object. -In previous versions of ArangoDB, the function returned *false* for empty arrays or for -objects without attributes. - - -### Changed return value of `TO_NUMBER` - -The AQL function `TO_NUMBER` now returns *null* if its argument is an object or an -array with more than one member. In previous version of ArangoDB, the return -value in these cases was 0. `TO_NUMBER` will return 0 for empty array, and the numeric -equivalent of the array member's value for arrays with a single member. - - -### New AQL keywords - -The following keywords have been added to AQL in ArangoDB 2.3: - -- *NOT* -- *AND* -- *OR* - -Unquoted usage of these keywords for attribute names in AQL queries will likely -fail in ArangoDB 2.3. If any such attribute name needs to be used in a query, it -should be enclosed in backticks to indicate the usage of a literal attribute -name. - - -Removed features ----------------- - -### Bitarray indexes - -Bitarray indexes were only half-way documented and integrated in previous versions -of ArangoDB so their benefit was limited. The support for bitarray indexes has -thus been removed in ArangoDB 2.3. It is not possible to create indexes of type -"bitarray" with ArangoDB 2.3. - -When a collection is opened that contains a bitarray index definition created -with a previous version of ArangoDB, ArangoDB will ignore it and log the following -warning: - - index type 'bitarray' is not supported in this version of ArangoDB and is ignored - -Future versions of ArangoDB may automatically remove such index definitions so the -warnings will eventually disappear. - - -### Other removed features - -The HTTP REST API method at `POST /_admin/modules/flush` has been removed. - - -Known issues ------------- - -In ArangoDB 2.3.0, AQL queries containing filter conditions with an IN expression -will not yet use an index: - - FOR doc IN collection FILTER doc.indexedAttribute IN [ ... ] RETURN doc - - FOR doc IN collection - FILTER doc.indexedAttribute IN [ ... ] - RETURN doc - -We’re currently working on getting the IN optimizations done, and will ship them in -a 2.3 maintenance release soon (e.g. 2.3.1 or 2.3.2). diff --git a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges24.md b/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges24.md deleted file mode 100644 index 73265321d6b3..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges24.md +++ /dev/null @@ -1,193 +0,0 @@ -Incompatible changes in ArangoDB 2.4 -==================================== - -It is recommended to check the following list of incompatible changes **before** -upgrading to ArangoDB 2.4, and adjust any client programs if necessary. - - -Changed behavior ----------------- - -### V8 upgrade - -The bundled V8 version has been upgraded from 3.16.14 to 3.29.59. - -The new version provides better error checking, which can lead to subtle changes -in the execution of JavaScript code. - -The following code, though nonsense, runs without error in 2.3 and 2.4 when -strict mode is not enabled: - - (function () { - a = true; - a.foo = 1; - })(); - -When enabling strict mode, the function will throw an error in 2.4 but not in 2.3: - - (function () { - "use strict"; - a = true; - a.foo = 1; - })(); - - TypeError: Cannot assign to read only property 'foo' of true - -Though this is a change in behavior it can be considered an improvement. The new version actually -uncovers an error that went undetected in the old version. - -Error messages have also changed slightly in the new version. Applications that -rely on the exact error messages of the JavaScript engine may need to be adjusted so they -look for the updated error messages. - -### Default endpoint - -The default endpoint for arangod is now `127.0.0.1`. - -This change will modify the IP address ArangoDB listens on to 127.0.0.1 by default. -This will make new ArangoDB installations unaccessible from clients other than -localhost unless the configuration is changed. This is a security feature. - -To make ArangoDB accessible from any client, change the server's configuration -(`--server.endpoint`) to either `tcp://0.0.0.0:8529` or the server's publicly -visible IP address. - -### Replication - -System collections are now included in the replication and all replication API return -values by default. - -This will lead to user accounts and credentials data being replicated from master to -slave servers. This may overwrite slave-specific database users. - -This may be considered a feature or an anti-feature, so it is configurable. - -If replication of system collections is undesired, they can be excluded from replication -by setting the `includeSystem` attribute to `false` in the following commands: - -* initial synchronization: `replication.sync({ includeSystem: false })` -* continuous replication: `replication.applier.properties({ includeSystem: false })` - -This will exclude all system collections (including `_aqlfunctions`, `_graphs` etc.) -from the initial synchronization and the continuous replication. - -If this is also undesired, it is also possible to specify a list of collections to -exclude from the initial synchronization and the continuous replication using the -`restrictCollections` attribute, e.g.: - - require("org/arangodb/replication").applier.properties({ - includeSystem: true, - restrictType: "exclude", - restrictCollections: [ "_users", "_graphs", "foo" ] - }); - -The above example will in general include system collections, but will exclude the -specified three collections from continuous replication. - -The HTTP REST API methods for fetching the replication inventory and for dumping -collections also support the `includeSystem` control flag via a URL parameter of -the same name. - -Build process changes ---------------------- - -Several options for the `configure` command have been removed in 2.4. The options - -* `--enable-all-in-one-v8` -* `--enable-all-in-one-icu` -* `--enable-all-in-one-libev` -* `--with-libev=DIR` -* `--with-libev-lib=DIR` -* `--with-v8=DIR` -* `--with-v8-lib=DIR` -* `--with-icu-config=FILE` - -are not available anymore because the build process will always use the bundled -versions of the libraries. - -When building ArangoDB from source in a directory that already contained a pre-2.4 -version, it will be necessary to run a `make superclean` command once and a full -rebuild afterwards: - - git pull - make superclean - make setup - ./configure - make - -Miscellaneous changes ---------------------- - -As a consequence of global renaming in the codebase, the option `mergeArrays` has -been renamed to `mergeObjects`. This option controls whether JSON objects will be -merged on an update operation or overwritten. The default has been, and still is, -to merge. Not specifying the parameter will lead to a merge, as it has been the -behavior in ArangoDB ever since. - -This affects the HTTP REST API method PATCH `/_api/document/collection/key`. Its -optional URL parameter `mergeArrays` for the option has been renamed to `mergeObjects`. - -The AQL `UPDATE` statement is also affected, as its option `mergeArrays` has also -been renamed to `mergeObjects`. The 2.3 query - - UPDATE doc IN collection WITH { ... } IN collection OPTIONS { mergeArrays: false } - -should thus be rewritten to the following in 2.4: - - UPDATE doc IN collection WITH { ... } IN collection OPTIONS { mergeObjects: false } - - -Deprecated features -------------------- - -For `FoxxController` objects, the method `collection()` is deprecated and will be -removed in future version of ArangoDB. Using this method will issue a warning. -Please use `applicationContext.collection()` instead. - -For `FoxxRepository` objects, the property `modelPrototype` is now deprecated. -Using it will issue a warning. Please use `FoxxRepository.model` instead. - -In `FoxxController` / `RequestContext`, calling method `bodyParam()` with three -arguments is deprecated. Please use `.bodyParam(paramName, options)` instead. - -In `FoxxController` / `RequestContext` calling method `queryParam({type: string})` -is deprecated. Please use `requestContext.queryParam({type: joi})` instead. - -In `FoxxController` / `RequestContext` calling method `pathParam({type: string})` -is deprecated. Please use `requestContext.pathParam({type: joi})` instead. - -For `FoxxModel`, calling `Model.extend({}, {attributes: {}})` is deprecated. -Please use `Model.extend({schema: {}})` instead. - -In module `org/arangodb/general-graph`, the functions `_undirectedRelation()` -and `_directedRelation()` are deprecated and will be removed in a future version -of ArangoDB. Both functions have been unified to `_relation()`. - -The modules `org/arangodb/graph` and `org/arangodb/graph-blueprint` are deprecated. -Please use module `org/arangodb/general-graph` instead. - -The HTTP REST API `_api/graph` and all its methods are deprecated. Please use -the general graph API `_api/gharial` instead. - - -Removed features ----------------- - -The following replication-related JavaScript methods became obsolete in ArangoDB -2.2 and have been removed in ArangoDB 2.4: - -* `require("org/arangodb/replication").logger.start()` -* `require("org/arangodb/replication").logger.stop()` -* `require("org/arangodb/replication").logger.properties()` - -The REST API methods for these functions have also been removed in ArangoDB 2.4: - -* HTTP PUT `/_api/replication/logger-start` -* HTTP PUT `/_api/replication/logger-stop` -* HTTP GET `/_api/replication/logger-config` -* HTTP PUT `/_api/replication/logger-config` - -Client applications that call one of these methods should be adjusted by removing -the calls to these methods. This shouldn't be problematic as these methods have -been no-ops since ArangoDB 2.2 anyway. - diff --git a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges25.md b/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges25.md deleted file mode 100644 index 1c2a4c602d84..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges25.md +++ /dev/null @@ -1,159 +0,0 @@ -Incompatible changes in ArangoDB 2.5 -==================================== - -It is recommended to check the following list of incompatible changes **before** -upgrading to ArangoDB 2.5, and adjust any client programs if necessary. - - -Changed behavior ----------------- - -### V8 - -The V8 version shipped with ArangoDB was upgraded from 3.29.59 to 3.31.74.1. -This leads to additional ECMAScript 6 (ES6 or "harmony") features being enabled by -default in ArangoDB's scripting environment. - -Apart from that, a change in the interpretation of command-line options by V8 may -affect users. ArangoDB passes the value of the command-line option `--javascript.v8-options` -to V8 and leaves interpretation of the contents to V8. For example, the ArangoDB option -`--javascript.v8-options="--harmony"` could be used to tell V8 to enable its harmony -features. - -In ArangoDB 2.4, the following harmony options were made available by V8: - -* --harmony_scoping (enable harmony block scoping) -* --harmony_modules (enable harmony modules (implies block scoping)) -* --harmony_proxies (enable harmony proxies) -* --harmony_generators (enable harmony generators) -* --harmony_numeric_literals (enable harmony numeric literals (0o77, 0b11)) -* --harmony_strings (enable harmony string) -* --harmony_arrays (enable harmony arrays) -* --harmony_arrow_functions (enable harmony arrow functions) -* --harmony_classes (enable harmony classes) -* --harmony_object_literals (enable harmony object literal extensions) -* --harmony (enable all harmony features (except proxies)) - -There was the option `--harmony`, which turned on almost all harmony features. - -In ArangoDB 2.5, V8 provides the following harmony-related options: - -* --harmony (enable all completed harmony features) -* --harmony_shipping (enable all shipped harmony features) -* --harmony_modules (enable "harmony modules (implies block scoping)" (in progress)) -* --harmony_arrays (enable "harmony array methods" (in progress)) -* --harmony_array_includes (enable "harmony Array.prototype.includes" (in progress)) -* --harmony_regexps (enable "harmony regular expression extensions" (in progress)) -* --harmony_arrow_functions (enable "harmony arrow functions" (in progress)) -* --harmony_proxies (enable "harmony proxies" (in progress)) -* --harmony_sloppy (enable "harmony features in sloppy mode" (in progress)) -* --harmony_unicode (enable "harmony unicode escapes" (in progress)) -* --harmony_tostring (enable "harmony toString") -* --harmony_numeric_literals (enable "harmony numeric literals") -* --harmony_strings (enable "harmony string methods") -* --harmony_scoping (enable "harmony block scoping") -* --harmony_classes (enable "harmony classes (implies block scoping & object literal extension)") -* --harmony_object_literals (enable "harmony object literal extensions") -* --harmony_templates (enable "harmony template literals") - -Note that there are extra options for better controlling the dedicated features, -and especially that the meaning of the `--harmony` option has changed from enabling -**all** harmony features to **all completed** harmony features! - -Users should adjust the value of `--javascript.v8-options` accordingly. - -Please note that incomplete harmony features are subject to change in future V8 releases. - - -### Sparse indexes - -Hash indexes and skiplist indexes can now be created in a sparse variant. -When not explicitly set, the `sparse` attribute defaults to `false` for new indexes. - -This causes a change in behavior when creating a unique hash index without specifying the -sparse flag. The unique hash index will be created in a non-sparse variant in ArangoDB 2.5. - -In 2.4 and before, unique hash indexes were implicitly sparse, always excluding `null` values -from the index. There was no option to control this behavior, and sparsity was neither supported -for non-unique hash indexes nor skiplists in 2.4. This implicit sparsity of just unique hash -indexes was considered an inconsistency, and therefore the behavior was cleaned up in 2.5. - -As of 2.5, hash and skiplist indexes will only be created sparse if sparsity is explicitly requested. -This may require a change in index-creating client code, but only if the client code creates -unique hash indexes and if they are still intended to be sparse. In this case, the client code -should explicitly set the `sparse` flag to `true` when creating a unique hash index. - -Existing unique hash indexes from 2.4 or before will automatically be migrated so they are still -sparse after the upgrade to 2.5. For these indexes, the `sparse` attribute will be populated -automatically with a value of `true`. - -Geo indexes are implicitly sparse, meaning documents without the indexed location attribute or -containing invalid location coordinate values will be excluded from the index automatically. This -is also a change when compared to pre-2.5 behavior, when documents with missing or invalid -coordinate values may have caused errors on insertion when the geo index' `unique` flag was set -and its `ignoreNull` flag was not. - -This was confusing and has been rectified in 2.5. The method `ensureGeoConstraint()` now does the -same as `ensureGeoIndex()`. Furthermore, the attributes `constraint`, `unique`, `ignoreNull` and -`sparse` flags are now completely ignored when creating geo indexes. Client index creation code -therefore does not need to set the `ignoreNull` or `constraint` attributes when creating a geo -index. - -The same is true for fulltext indexes. There is no need to specify non-uniqueness or sparsity for -geo or fulltext indexes. They will always be non-unique and sparse. - - -### Moved Foxx applications to a different folder. - -Until 2.4 Foxx apps were stored in the following folder structure: -`/databases//:`. -This caused some trouble as apps where cached based on name and version and updates did not apply. -Also the path on filesystem and the app's access URL had no relation to one another. -Now the path on filesystem is identical to the URL (except the appended APP): -`/_db///APP` - -### Foxx Development mode - -The development mode used until 2.4 is gone. It has been replaced by a much more mature version. -This includes the deprecation of the javascript.dev-app-path parameter, which is useless since 2.5. -Instead of having two separate app directories for production and development, apps now reside in -one place, which is used for production as well as for development. -Apps can still be put into development mode, changing their behavior compared to production mode. -Development mode apps are still reread from disk at every request, and still they ship more debug -output. - -This change has also made the startup options `--javascript.frontend-development-mode` and -`--javascript.dev-app-path` obsolete. The former option will not have any effect when set, and the -latter option is only read and used during the upgrade to 2.5 and does not have any effects later. - -### Foxx install process - -Installing Foxx apps has been a two step process: import them into ArangoDB and mount them at a -specific mount point. These operations have been joined together. You can install an app at one -mount point, that's it. No fetch, mount, unmount, purge cycle anymore. The commands have been -simplified to just: - -* install: get your Foxx app up and running -* uninstall: shut it down and erase it from disk -Deprecated features -------------------- - -* Foxx: method `Model#toJSONSchema(id)` is deprecated, it will raise a warning if you use it. Please use `Foxx.toJSONSchema(id, model)` instead. - -Removed features ----------------- - -* Startup switch `--javascript.frontend-development-mode`: Its major purpose was internal development -anyway. Now the web frontend can be set to development mode similar to any other Foxx app. -* Startup switch `--javascript.dev-app-path`: Was used for the development mode of Foxx. This is -integrated with the normal app-path now and can be triggered on app level. The second app-path is -superfluous. -* Foxx: `controller.collection`: Please use `appContext.collection` instead. -* Foxx: `FoxxRepository.modelPrototype`: Please use `FoxxRepository.model` instead. -* Foxx: `Model.extend({}, {attributes: {}})`: Please use `Model.extend({schema: {}})` instead. -* Foxx: `requestContext.bodyParam(paramName, description, Model)`: Please use `requestContext.bodyParam(paramName, options)` instead. -* Foxx: `requestContext.queryParam({type: string})`: Please use `requestContext.queryParam({type: joi})` instead. -* Foxx: `requestContext.pathParam({type: string})`: Please use `requestContext.pathParam({type: joi})` instead. -* Graph: The modules `org/arangodb/graph` and `org/arangodb/graph-blueprint`: Please use module `org/arangodb/general-graph` instead. NOTE: This does not mean we do not support blueprints any more. General graph covers everything the graph--blueprint did, plus many more features. -* General-Graph: In the module `org/arangodb/general-graph` the functions `_undirectedRelation` and `_directedRelation` are no longer available. Both functions have been unified to `_relation`. - diff --git a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges26.md b/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges26.md deleted file mode 100644 index 7ae75566869d..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges26.md +++ /dev/null @@ -1,365 +0,0 @@ -Incompatible changes in ArangoDB 2.6 -==================================== - -It is recommended to check the following list of incompatible changes **before** -upgrading to ArangoDB 2.6, and adjust any client programs if necessary. - -Requirements ------------- - -ArangoDB's built-in web interface now uses cookies for session management. -Session information ids are stored in cookies, so clients using the web interface must -accept cookies in order to log in and use it. - -Foxx changes ------------- - -### Foxx Queues - -Foxx Queue job type definitions were previously based on functions and had to be registered before use. Due to changes in 2.5 this resulted in problems when restarting the server or defining job types incorrectly. - -Function-based job types have been deprecated in 2.6 and will be removed entirely in 2.7. - -In order to convert existing function-based job types to the new script-based job types, create custom scripts in your Foxx app and reference them by their name and the mount point of the app they are defined in. Official job types from the Foxx app store can be upgraded by upgrading from the 1.x version to the 2.x version of the same app. - -In order to upgrade queued jobs to the new job types, you need to update the `type` property of the affected jobs in the database's `_jobs` system collection. In order to see the collection in the web interface you need to enable the collection type "System" in the collection list options. - -Example: - -Before: `"type": "mailer.postmark"` - -After: `"type": {"name": "mailer", "mount": "/my-postmark-mailer"}` - -### Foxx Sessions - -The options `jwt` and `type` of the controller method `controller.activateSessions` have been deprecated in 2.6 and will be removed entirely in 2.7. - -If you want to use pure JWT sessions, you can use the `sessions-jwt` Foxx app from the Foxx app store. - -If you want to use your own JWT-based sessions, you can use the JWT functions in the `crypto` module directly. - -Instead of using the `type` option you can just use the `cookie` and `header` options on their own, which both now accept the value `true` to enable them with their default configurations. - -The option `sessionStorageApp` has been renamed to `sessionStorage` and now also accepts session storages directly. The old option `sessionStorageApp` will be removed entirely in 2.7. - -### Libraries - -The bundled version of the `joi` library used in Foxx was upgraded to version 6.0.8. -This may affect Foxx applications that depend on the library. - -AQL changes ------------ - -### AQL LENGTH function - -The return value of the AQL `LENGTH` function was changed if `LENGTH` is applied on `null` or a -boolean value: - -* `LENGTH(null)` now returns `0`. In previous versions of ArangoDB, this returned `4`. - -* `LENGTH(false)` now returns `0`. In previous versions of ArangoDB, the return value was `5`. - -* `LENGTH(true)` now returns `1`. In previous versions of ArangoDB, the return value was `4`. - -### AQL graph functions - -In 2.6 the graph functions did undergo a performance lifting. -During this process we had to adopt the result format and the options for some of them. -Many graph functions now have an option `includeData` which allows to trigger -if the result of this function should contain fully extracted documents `includeData: true` -or only the `_id` values `includeData: false`. -In most use cases the `_id` is sufficient to continue and the extraction of data is an unnecessary -operation. -The AQL functions supporting this additional option are: - -* SHORTEST_PATH -* NEIGHBORS -* GRAPH_SHORTEST_PATH -* GRAPH_NEIGHBORS -* GRAPH_EDGES - -Furthermore the result `SHORTEST_PATH` has changed. The old format returned a list of all vertices on the path. -Optionally it could include each sub-path for these vertices. -All of the documents were fully extracted. -Example: -``` -[ - { - vertex: { - _id: "vertex/1", - _key: "1", - _rev: "1234" - name: "Alice" - }, - path: { - vertices: [ - { - _id: "vertex/1", - _key: "1", - _rev: "1234" - name: "Alice" - } - ], - edges: [] - } - }, - { - vertex: { - _id: "vertex/2", - _key: "2", - _rev: "5678" - name: "Bob" - }, - path: { - vertices: [ - { - _id: "vertex/1", - _key: "1", - _rev: "1234" - name: "Alice" - }, { - _id: "vertex/2", - _key: "2", - _rev: "5678" - name: "Bob" - } - ], - edges: [ - { - _id: "edge/1", - _key: "1", - _rev: "9876", - type: "loves" - } - ] - } - } -] -``` - -The new version is more compact. -Each `SHORTEST_PATH` will only return one document having the attributes `vertices`, `edges`, `distance`. -The `distance` is computed taking into account the given weight. -Optionally the documents can be extracted with `includeData: true` -Example: -``` -{ - vertices: [ - "vertex/1", - "vertex/2" - ], - edges: [ - "edge/1" - ], - distance: 1 -} -``` - -The next function that returns a different format is `NEIGHBORS`. -Since 2.5 it returned an object with `edge` and `vertex` for each connected edge. -Example: -``` -[ - { - vertex: { - _id: "vertex/2", - _key: "2", - _rev: "5678" - name: "Bob" - }, - edge: { - _id: "edge/1", - _key: "1", - _rev: "9876", - type: "loves" - } - } -] -``` -With 2.6 it will only return the vertex directly, again using `includeData: true`. -By default it will return a distinct set of neighbors, using the option `distinct: false` -will include the same vertex for each edge pointing to it. - -Example: -``` -[ - "vertex/2" -] -``` - -Function and API changes ------------------------- - -### Graph measurements functions - -All graph measurements functions in JavaScript module `general-graph` that calculated a -single figure previously returned an array containing just the figure. Now these functions -will return the figure directly and not put it inside an array. - -The affected functions are: - -* `graph._absoluteEccentricity` -* `graph._eccentricity` -* `graph._absoluteCloseness` -* `graph._closeness` -* `graph._absoluteBetweenness` -* `graph._betweenness` -* `graph._radius` -* `graph._diameter` - -Client programs calling these functions should be adjusted so they process the scalar value -returned by the function instead of the previous array value. - -### Cursor API - -A batchSize value `0` is now disallowed when calling the cursor API via HTTP -`POST /_api/cursor`. - -The HTTP REST API `POST /_api/cursor` does not accept a `batchSize` parameter value of -`0` any longer. A batch size of 0 never made much sense, but previous versions of ArangoDB -did not check for this value. Now creating a cursor using a `batchSize` value 0 will -result in an HTTP 400 error response. - -### Document URLs returned - -The REST API method GET `/_api/document?collection=...` (that method will return partial URLs -to all documents in the collection) will now properly prefix document address URLs with the -current database name. - -Previous versions of ArangoDB returned the URLs starting with `/_api/` but without the current -database name, e.g. `/_api/document/mycollection/mykey`. Starting with 2.6, the response URLs -will include the database name as well, e.g. `/_db/_system/_api/document/mycollection/mykey`. - -### Fulltext indexing - -Fulltext indexes will now also index text values contained in direct sub-objects of the indexed -attribute. - -Previous versions of ArangoDB only indexed the attribute value if it was a string. Sub-attributes -of the index attribute were ignored when fulltext indexing. - -Now, if the index attribute value is an object, the object's values will each be included in the -fulltext index if they are strings. If the index attribute value is an array, the array's values -will each be included in the fulltext index if they are strings. - -Deprecated server functionality -------------------------------- - -### Simple queries - -The following simple query functions are now deprecated: - -* `collection.near` -* `collection.within` -* `collection.geo` -* `collection.fulltext` -* `collection.range` -* `collection.closedRange` - -This also lead to the following REST API methods being deprecated from now on: - -* `PUT /_api/simple/near` -* `PUT /_api/simple/within` -* `PUT /_api/simple/fulltext` -* `PUT /_api/simple/range` - -It is recommended to replace calls to these functions or APIs with equivalent AQL queries, -which are more flexible because they can be combined with other operations: - - FOR doc IN NEAR(@@collection, @latitude, @longitude, @limit) - RETURN doc - - FOR doc IN WITHIN(@@collection, @latitude, @longitude, @radius, @distanceAttributeName) - RETURN doc - - FOR doc IN FULLTEXT(@@collection, @attributeName, @queryString, @limit) - RETURN doc - - FOR doc IN @@collection - FILTER doc.value >= @left && doc.value < @right - LIMIT @skip, @limit - RETURN doc` - -The above simple query functions and REST API methods may be removed in future versions -of ArangoDB. - -Using negative values for `SimpleQuery.skip()` is also deprecated. -This functionality will be removed in future versions of ArangoDB. - -### AQL functions - -The AQL `SKIPLIST` function has been deprecated because it is obsolete. - -The function was introduced in older versions of ArangoDB with a less powerful query optimizer to -retrieve data from a skiplist index using a `LIMIT` clause. - -Since 2.3 the same goal can be achieved by using regular AQL constructs, e.g. - - FOR doc IN @@collection - FILTER doc.value >= @value - SORT doc.value - LIMIT 1 - RETURN doc - - -Startup option changes ----------------------- - -### Options added - -The following configuration options have been added in 2.6: - -* `--server.session-timeout`: allows controlling the timeout of user sessions in the web interface. - The value is specified in seconds. - -* `--server.foxx-queues`: controls whether the Foxx queue manager will check queue and job entries. - Disabling this option can reduce server load but will prevent jobs added to Foxx queues from - being processed at all. - - The default value is `true`, enabling the Foxx queues feature. - -* `--server.foxx-queues-poll-interval`: allows adjusting the frequency with which the Foxx queues - manager is checking the queue (or queues) for jobs to be executed. - - The default value is `1` second. Lowering this value will result in the queue manager waking - up and checking the queues more frequently, which may increase CPU usage of the server. - - Note: this option only has an effect when `--server.foxx-queues` is not set to `false`. - -### Options removed - -The following configuration options have been removed in 2.6.: - -* `--log.severity`: the docs for `--log.severity` mentioned lots of severities (e.g. - `exception`, `technical`, `functional`, `development`) but only a few severities (e.g. - `all`, `human`) were actually used, with `human` being the default and `all` enabling the - additional logging of incoming requests. - - The option pretended to control a lot of things which it actually didn't. Additionally, - the option `--log.requests-file` was around for a long time already, also controlling - request logging. - - Because the `--log.severity` option effectively did not control that much, it was removed. - A side effect of removing the option is that 2.5 installations started with option - `--log.severity all` will not log requests after the upgrade to 2.6. This can be adjusted - by setting the `--log.requests-file` option instead. - -### Default values changed - -The default values for the following options have changed in 2.6: - -* `--database.ignore-datafile-errors`: the default value for this option was changed from `true` - to `false`. - - If the new default value of `false` is used, then arangod will refuse loading collections that - contain datafiles with CRC mismatches or other errors. A collection with datafile errors will - then become unavailable. This prevents follow up errors from happening. - - The only way to access such collection is to use the datafile debugger (arango-dfdb) and try to - repair or truncate the datafile with it. - -* `--server.request-timeout`: the default value was increased from 300 to 1200 seconds for all - client tools (arangosh, arangoimp, arangodump, arangorestore). - -* `--server.connect-timeout`: the default value was increased from 3 to 5 seconds for all client - tools (arangosh, arangoimp, arangodump, arangorestore). diff --git a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges27.md b/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges27.md deleted file mode 100644 index 8d5207f7f0fe..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges27.md +++ /dev/null @@ -1,243 +0,0 @@ -Incompatible changes in ArangoDB 2.7 -==================================== - -It is recommended to check the following list of incompatible changes **before** -upgrading to ArangoDB 2.7, and adjust any client programs if necessary. - - -AQL changes ------------ - -`DISTINCT` is now a keyword in AQL. - -AQL queries that use `DISTINCT` (in lower, upper or mixed case) as an identifier (i.e. as a -variable, a collection name or a function name) will stop working. To make such queries -working again, each occurrence of `DISTINCT` in an AQL query should be enclosed in backticks. -This will turn `DISTINCT` from a keyword into an identifier again. - -The AQL function `SKIPLIST()` has been removed in ArangoDB 2.7. This function was deprecated in -ArangoDB 2.6. It was a left-over from times when the query optimizer wasn't able to use skiplist -indexes together with filters, skip and limit values. Since this issue been fixed since version 2.3, -there is no AQL replacement function for `SKIPLIST`. Queries that use the `SKIPLIST` function -can be fixed by using the usual combination of `FOR`, `FILTER` and `LIMIT`, e.g. - -``` - FOR doc IN @@collection - FILTER doc.value >= @value - SORT doc.value DESC - LIMIT 1 - RETURN doc -``` - -Foxx changes ------------- - -### Bundling and compilation - -The `assets` property is no longer supported in Foxx manifests and is scheduled to be removed in a future version of ArangoDB. The `files` property can still be used to serve static assets but it is recommended to use separate tooling to compile and bundle your assets. - -### Manifest scripts - -The properties `setup` and `teardown` have been moved into the `scripts` property map: - -**Before:** - -```json -{ - ... - "setup": "scripts/setup.js", - "teardown": "scripts/teardown.js" -} -``` - -**After:** - -```json -{ - ... - "scripts": { - "setup": "scripts/setup.js", - "teardown": "scripts/teardown.js" - } -} -``` - -### Foxx Queues - -Function-based Foxx Queue job types are no longer supported. To learn about how you can use the new script-based job types [follow the updated recipe in the cookbook](https://docs.arangodb.com/2.8/Cookbook/FoxxQueues.html). - -### Foxx Sessions - -The `jwt` and `type` options have been removed from the `activateSessions` API. - -If you want to replicate the behavior of the `jwt` option you can use the JWT functions in the `crypto` module. A JWT-based session storage that doesn't write sessions to the database is available as the [sessions-jwt app](https://github.com/arangodb/foxx-sessions-jwt) in the Foxx app store. - -The session type is now inferred from the presence of the `cookie` or `header` options (allowing you to enable support for both). If you want to use the default settings for `cookie` or `header` you can pass the value `true` instead. - -The `sessionStorageApp` option has been removed in favour of the `sessionStorage` option. - -**Before:** - -```js -var Foxx = require('org/arangodb/foxx'); -var ctrl = new Foxx.Controller(applicationContext); - -ctrl.activateSessions({ - sessionStorageApp: 'some-sessions-app', - type: 'cookie' -}); -``` - -**After:** - -```js -ctrl.activateSessions({ - sessionStorage: applicationContext.dependencies.sessions.sessionStorage, - cookie: true -}); -``` - -### Request module - -The module `org/arangodb/request` uses an internal library function for sending HTTP -requests. This library functionally unconditionally set an HTTP header `Accept-Encoding: gzip` -in all outgoing HTTP requests, without client code having to set this header explicitly. - -This has been fixed in 2.7, so `Accept-Encoding: gzip` is not set automatically anymore. -Additionally the header `User-Agent: ArangoDB` is not set automatically either. If -client applications rely on these headers being sent, they are free to add it when -constructing requests using the request module. - -The `internal.download()` function is also affected by this change. Again, the header -can be added here if required by passing it via a `headers` sub-attribute in the -third parameter (`options`) to this function. - - -arangodump / backups --------------------- - -The filenames in dumps created by arangodump now contain not only the name of the dumped -collection, but also an additional 32-digit hash value. This is done to prevent overwriting -dump files in case-insensitive file systems when there exist multiple collections with the -same name (but with different cases). - -This change leads to changed filenames in dumps created by arangodump. If any client -scripts depend on the filenames in the dump output directory being equal to the collection -name plus one of the suffixes `.structure.json` and `.data.json`, they need to be adjusted. - -Starting with ArangoDB 2.7, the file names will contain an underscore plus the 32-digit -MD5 value (represented in hexadecimal notation) of the collection name. - -For example, when arangodump dumps data of two collections *test* and *Test*, the -filenames in previous versions of ArangoDB were: - -* `test.structure.json` (definitions for collection *test*) -* `test.data.json` (data for collection *test*) -* `Test.structure.json` (definitions for collection *Test*) -* `Test.data.json` (data for collection *Test*) - -In 2.7, the filenames will be: - -* `test_098f6bcd4621d373cade4e832627b4f6.structure.json` (definitions for collection *test*) -* `test_098f6bcd4621d373cade4e832627b4f6.data.json` (data for collection *test*) -* `Test_0cbc6611f5540bd0809a388dc95a615b.structure.json` (definitions for collection *Test*) -* `Test_0cbc6611f5540bd0809a388dc95a615b.data.json` (data for collection *Test*) - - -Starting / stopping -------------------- - -When starting arangod, the server will now drop the process privileges to the -specified values in options `--server.uid` and `--server.gid` instantly after -parsing the startup options. - -That means when either `--server.uid` or `--server.gid` are set, the privilege -change will happen earlier. This may prevent binding the server to an endpoint -with a port number lower than 1024 if the arangodb user has no privileges -for that. Previous versions of ArangoDB changed the privileges later, so some -startup actions were still carried out under the invoking user (i.e. likely -*root* when started via init.d or system scripts) and especially binding to -low port numbers was still possible there. - -The default privileges for user *arangodb* will not be sufficient for binding -to port numbers lower than 1024. To have an ArangoDB 2.7 bind to a port number -lower than 1024, it needs to be started with either a different privileged user, -or the privileges of the *arangodb* user have to raised manually beforehand. - -Additionally, Linux startup scripts and systemd configuration for arangod now -will adjust the NOFILE (number of open files) limits for the process. The limit -value is set to 131072 (128k) when ArangoDB is started via start/stop commands. -The goal of this change is to prevent arangod from running out of available -file descriptors for socket connections and datafiles. - - -Connection handling -------------------- - -arangod will now actually close lingering client connections when idle for at least -the duration specified in the `--server.keep-alive-timeout` startup option. - -In previous versions of ArangoDB, idle connections were not closed by the server -when the timeout was reached and the client was still connected. Now the -connection is properly closed by the server in case of timeout. Client -applications relying on the old behavior may now need to reconnect to the -server when their idle connections time out and get closed (note: connections -being idle for a long time may be closed by the OS or firewalls anyway - -client applications should be aware of that and try to reconnect). - - -Option changes --------------- - -### Configure options removed - -The following options for `configure` have been removed because they were unused -or exotic: - -* `--enable-timings` -* `--enable-figures` - -### Startup options added - -The following configuration options have been added in 2.7: - -* `--database.query-cache-max-results`: sets the maximum number of results in AQL - query result cache per database -* `--database.query-cache-mode`: sets the mode for the AQL query results cache. - Possible values are `on`, `off` and `demand`. The default value is `off` - - -Miscellaneous changes ---------------------- - -### Simple queries - -Many simple queries provide a `skip()` function that can be used to skip over a certain number -of documents in the result. This function allowed specifying negative offsets in previous versions -of ArangoDB. Specifying a negative offset led to the query result being iterated in reverse order, -so skipping was performed from the back of the result. As most simple queries do not provide a -guaranteed result order, skipping from the back of a result with unspecific order seems a rather -exotic use case and was removed to increase consistency with AQL, which also does not provide -negative skip values. - -Negative skip values were deprecated in ArangoDB 2.6. - -### Tasks API - -The undocumented function `addJob()` has been removed from the `org/arangodb/tasks` module in -ArangoDB 2.7. - -### Runtime endpoints manipulation API - -The following HTTP REST API methods for runtime manipulation of server endpoints have been -removed in ArangoDB 2.7: - -* POST `/_api/endpoint`: to dynamically add an endpoint while the server was running -* DELETE `/_api/endpoint`: to dynamically remove an endpoint while the server was running - -This change also affects the equivalent JavaScript endpoint manipulation methods available -in Foxx. The following functions have been removed in ArangoDB 2.7: - -* `db._configureEndpoint()` -* `db._removeEndpoint()` - diff --git a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges28.md b/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges28.md deleted file mode 100644 index 556a2cd3ea48..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges28.md +++ /dev/null @@ -1,209 +0,0 @@ -Incompatible changes in ArangoDB 2.8 -==================================== - -It is recommended to check the following list of incompatible changes **before** -upgrading to ArangoDB 2.8, and adjust any client programs if necessary. - - -AQL ---- - -### Keywords added - -The following AQL keywords were added in ArangoDB 2.8: - -* `GRAPH` -* `OUTBOUND` -* `INBOUND` -* `ANY` -* `ALL` -* `NONE` -* `AGGREGATE` - -Usage of these keywords for collection names, variable names or attribute names -in AQL queries will not be possible without quoting. For example, the following -AQL query will still work as it uses a quoted collection name and a quoted -attribute name: - -``` -FOR doc IN `OUTBOUND` - RETURN doc.`any` -``` - -### Changed behavior - -The AQL functions `NEAR` and `WITHIN` now have stricter validations -for their input parameters `limit`, `radius` and `distance`. They may now throw -exceptions when invalid parameters are passed that may have not led -to exceptions in previous versions. - - -Additionally, the expansion (`[*]`) operator in AQL has changed its behavior when -handling non-array values: - -In ArangoDB 2.8, calling the expansion operator on a non-array value will always -return an empty array. Previous versions of ArangoDB expanded non-array values by -calling the `TO_ARRAY()` function for the value, which for example returned an -array with a single value for boolean, numeric and string input values, and an array -with the object's values for an object input value. This behavior was inconsistent -with how the expansion operator works for the array indexes in 2.8, so the behavior -is now unified: - -- if the left-hand side operand of `[*]` is an array, the array will be returned as - is when calling `[*]` on it -- if the left-hand side operand of `[*]` is not an array, an empty array will be - returned by `[*]` - -AQL queries that rely on the old behavior can be changed by either calling `TO_ARRAY` -explicitly or by using the `[*]` at the correct position. - -The following example query will change its result in 2.8 compared to 2.7: - - LET values = "foo" RETURN values[*] - -In 2.7 the query has returned the array `[ "foo" ]`, but in 2.8 it will return an -empty array `[ ]`. To make it return the array `[ "foo" ]` again, an explicit -`TO_ARRAY` function call is needed in 2.8 (which in this case allows the removal -of the `[*]` operator altogether). This also works in 2.7: - - LET values = "foo" RETURN TO_ARRAY(values) - -Another example: - - LET values = [ { name: "foo" }, { name: "bar" } ] - RETURN values[*].name[*] - -The above returned `[ [ "foo" ], [ "bar" ] ] in 2.7. In 2.8 it will return -`[ [ ], [ ] ]`, because the value of `name` is not an array. To change the results -to the 2.7 style, the query can be changed to - - LET values = [ { name: "foo" }, { name: "bar" } ] - RETURN values[* RETURN TO_ARRAY(CURRENT.name)] - -The above also works in 2.7. -The following types of queries won't change: - - LET values = [ 1, 2, 3 ] RETURN values[*] - LET values = [ { name: "foo" }, { name: "bar" } ] RETURN values[*].name - LET values = [ { names: [ "foo", "bar" ] }, { names: [ "baz" ] } ] RETURN values[*].names[*] - LET values = [ { names: [ "foo", "bar" ] }, { names: [ "baz" ] } ] RETURN values[*].names[**] - - -### Deadlock handling - -Client applications should be prepared to handle error 29 (`deadlock detected`) -that ArangoDB may now throw when it detects a deadlock across multiple transactions. -When a client application receives error 29, it should retry the operation that -failed. - -The error can only occur for AQL queries or user transactions that involve -more than a single collection. - - -### Optimizer - -The AQL execution node type `IndexRangeNode` was replaced with a new more capable -execution node type `IndexNode`. That means in execution plan explain output there -will be no more `IndexRangeNode`s but only `IndexNode`. This affects explain output -that can be retrieved via `require("org/arangodb/aql/explainer").explain(query)`, -`db._explain(query)`, and the HTTP query explain API. - -The optimizer rule that makes AQL queries actually use indexes was also renamed -from `use-index-range` to `use-indexes`. Again this affects explain output -that can be retrieved via `require("org/arangodb/aql/explainer").explain(query)`, -`db._explain(query)`, and the HTTP query explain API. - -The query optimizer rule `remove-collect-into` was renamed to `remove-collect-variables`. -This affects explain output that can be retrieved via `require("org/arangodb/aql/explainer").explain(query)`, -`db._explain(query)`, and the HTTP query explain API. - - -HTTP API --------- - -When a server-side operation got canceled due to an explicit client cancel request -via HTTP `DELETE /_api/job`, previous versions of ArangoDB returned an HTTP status -code of 408 (request timeout) for the response of the canceled operation. - -The HTTP return code 408 has caused problems with some client applications. Some -browsers (e.g. Chrome) handled a 408 response by resending the original request, -which is the opposite of what is desired when a job should be canceled. - -Therefore ArangoDB will return HTTP status code 410 (gone) for canceled operations -from version 2.8 on. - - -Foxx ----- - -### Model and Repository - -Due to compatibility issues the Model and Repository types are no longer implemented as ES2015 classes. - -The pre-2.7 "extend" style subclassing is supported again and will not emit any deprecation warnings. - -```js -var Foxx = require('org/arangodb/foxx'); -var MyModel = Foxx.Model.extend({ - // ... - schema: {/* ... */} -}); -``` - -### Module resolution - -The behavior of the JavaScript module resolution used by the `require` function has -been modified to improve compatibility with modules written for Node.js. - -Specifically - -* absolute paths (e.g. `/some/absolute/path`) are now always interpreted as absolute - file system paths, relative to the file system root - -* global names (e.g. `global/name`) are now first intepreted as references to modules - residing in a relevant `node_modules` folder, a built-in module or a matching - document in the internal `_modules` collection, and only resolved to local file paths - if no other match is found - -Previously the two formats were treated interchangeably and would be resolved to local -file paths first, leading to problems when local files used the same names as other -modules (e.g. a local file `chai.js` would cause problems when trying to load the -`chai` module installed in `node_modules`). - -For more information see the [blog announcement of this change](https://www.arangodb.com/2015/11/foxx-module-resolution-will-change-in-2-8/) -and the [upgrade guide](../Upgrading/VersionSpecific/Upgrading28.md#upgrading-foxx-apps-generated-by-arangodb-27-and-earlier). - -### Module `org/arangodb/request` - -The module now always returns response bodies, even for error responses. In versions -prior to 2.8 the module would silently drop response bodies if the response header -indicated an error. - -The old behavior of not returning bodies for error responses can be restored by -explicitly setting the option `returnBodyOnError` to `false`: - -```js -let response = request({ - //... - returnBodyOnError: false -}); -``` - -### Garbage collection - -The V8 garbage collection strategy was slightly adjusted so that it eventually -happens in all V8 contexts that hold V8 external objects (references to ArangoDB -documents and collections). This enables a better cleanup of these resources and -prevents other processes such as compaction being stalled while waiting for these -resources to be released. - -In this context the default value for the JavaScript garbage collection frequency -(`--javascript.gc-frequency`) was also increased from 10 seconds to 15 seconds, -as less internal operations in ArangoDB are carried out in JavaScript. - -Client tools ------------- - -arangodump will now fail by default when trying to dump edges that -refer to already dropped collections. This can be circumvented by -specifying the option `--force true` when invoking arangodump diff --git a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges30.md b/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges30.md deleted file mode 100644 index 5242473870e9..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges30.md +++ /dev/null @@ -1,1112 +0,0 @@ -Incompatible changes in ArangoDB 3.0 -==================================== - -It is recommended to check the following list of incompatible changes **before** -upgrading to ArangoDB 3.0, and adjust any client programs if necessary. - -Build system ------------- - -Building ArangoDB 3.0 from source now requires CMake. - -The pre-3.0 build system used a configure-based approach. The steps to build -ArangoDB 2.8 from source code were: - -``` -make setup -./configure -make -``` - -These steps will not work anymore, as ArangoDB 3.0 does not come with a -configure script. - -To build 3.0 on Linux, create a separate build directory first: - -``` -mkdir -p build -``` - -and then create the initial build scripts once using CMake: - -``` -(cd build && cmake ..) -``` - -The above command will configure the build and check for the required -dependencies. If everything works well the actual build can be started with - -``` -(cd build && make) -``` - -The binaries for the ArangoDB server and all client tools will then be created -inside the `build` directory. To start ArangoDB locally from the `build` directory, -use - -``` -build/bin/arangod -``` - -Datafiles and datafile names ----------------------------- - -ArangoDB 3.0 uses a new VelocyPack-based format for storing data in WAL logfiles -and collection datafiles. The file format is not compatible with the files used -in prior versions of ArangoDB. That means datafiles written by ArangoDB 3.0 cannot be -used in earlier versions and vice versa. - -The pattern for collection directory names was changed in 3.0 to include a random -id component at the end. The new pattern is `collection--`, where `` -is the collection id and `` is a random number. Previous versions of ArangoDB -used a pattern `collection-` without the random number. - -User Management ---------------- - -Unlike ArangoDB 2.x, ArangoDB 3.0 users are now separated from databases, and you can -grant one or more database permissions to a user. - -If you want to mimic the behavior of ArangoDB, you should name your users like -`username@dbname`. - -Users that can access the *_system* database are allowed to manage users and -permissions for all databases. - -Edges and edges attributes --------------------------- - -In ArangoDB prior to 3.0 the attributes `_from` and `_to` of edges were treated -specially when loading or storing edges. That special handling led to these attributes -being not as flexible as regular document attributes. For example, the `_from` and -`_to` attribute values of an existing edge could not be updated once the edge was -created. Additionally, the `_from` and `_to` attributes could not be indexed in -user-defined indexes, e.g. to make each combination of `_from` and `_to` unique. -Finally, as `_from` and `_to` referenced the linked collections by collection id -and not by collection name, their meaning became unclear once a referenced collection -was dropped. The collection id stored in edges then became unusable, and when -accessing such edge the collection name part of it was always translated to `_undefined`. - -In ArangoDB 3.0, the `_from` and `_to` values of edges are saved as regular strings. -This allows using `_from` and `_to` in user-defined indexes. Additionally this allows -updating the `_from` and `_to` values of existing edges. Furthermore, collections -referenced by `_from` and `_to` values may be dropped and re-created later. Any -`_from` and `_to` values of edges pointing to such dropped collection are unaffected -by the drop operation now. Also note that renaming the collection referenced in -`_from` and `_to` in ArangoDB 2.8 also relinked the edges. In 3.0 the edges are NOT -automatically relinked to the new collection anymore. - -Documents ---------- - -Documents (in contrast to edges) cannot contain the attributes `_from` or `_to` on the -main level in ArangoDB 3.0. These attributes will be automatically removed when saving -documents (i.e. non-edges). `_from` and `_to` can be still used in sub-objects inside -documents. - -The `_from` and `_to` attributes will of course be preserved and are still required when -saving edges. - -AQL ---- - -### Edges handling - -When updating or replacing edges via AQL, any modifications to the `_from` and `_to` -attributes of edges were ignored by previous versions of ArangoDB, without signaling -any errors. This was due to the `_from` and `_to` attributes being immutable in earlier -versions of ArangoDB. - -From 3.0 on, the `_from` and `_to` attributes of edges are mutable, so any AQL queries that -modify the `_from` or `_to` attribute values of edges will attempt to actually change these -attributes. Clients should be aware of this change and should review their queries that -modify edges to rule out unintended side-effects. - -Additionally, when completely replacing the data of existing edges via the AQL `REPLACE` -operation, it is now required to specify values for the `_from` and `_to` attributes, -as `REPLACE` requires the entire new document to be specified. If either `_from` or `_to` -are missing from the replacement document, an `REPLACE` operation will fail. - -#### Graph functions - -In version 3.0 all former graph related functions have been removed from AQL to -be replaced by [native AQL constructs](../../AQL/Graphs/index.html). -These constructs allow for more fine-grained filtering on several graph levels. -Also this allows the AQL optimizer to automatically improve these queries by -enhancing them with appropriate indexes. -We have created recipes to upgrade from 2.8 to 3.0 when using these functions. - -The functions: - -* GRAPH_COMMON_NEIGHBORS -* GRAPH_COMMON_PROPERTIES -* GRAPH_DISTANCE_TO -* GRAPH_EDGES -* GRAPH_NEIGHBORS -* GRAPH_TRAVERSAL -* GRAPH_TRAVERSAL_TREE -* GRAPH_SHORTEST_PATH -* GRAPH_PATHS -* GRAPH_VERTICES - -are covered in [Migrating GRAPH_* Functions from 2.8 or earlier to 3.0](../../Cookbook/AQL/MigratingGraphFunctionsTo3.html) - -* GRAPH_ABSOLUTE_BETWEENNESS -* GRAPH_ABSOLUTE_CLOSENESS -* GRAPH_ABSOLUTE_ECCENTRICITY -* GRAPH_BETWEENNESS -* GRAPH_CLOSENESS -* GRAPH_DIAMETER -* GRAPH_ECCENTRICITY -* GRAPH_RADIUS - -are covered in [Migrating GRAPH_* Measurements from 2.8 or earlier to 3.0](../../Cookbook/AQL/MigratingMeasurementsTo3.html) - -* EDGES -* NEIGHBORS -* PATHS -* TRAVERSAL -* TRAVERSAL_TREE - -are covered in [Migrating anonymous graph functions from 2.8 or earlier to 3.0](../../Cookbook/AQL/MigratingEdgeFunctionsTo3.html) - -### Typecasting functions - -The type casting applied by the `TO_NUMBER()` AQL function has changed as follows: -- string values that do not contain a valid numeric value are now converted to the number - `0`. In previous versions of ArangoDB such string values were converted to the value - `null`. -- array values with more than 1 member are now converted to the number `0`. In previous - versions of ArangoDB such arrays were converted to the value `null`. -- objects / documents are now converted to the number `0`. In previous versions of ArangoDB - objects / documents were converted to the value `null`. - -Additionally, the `TO_STRING()` AQL function now converts `null` values into an empty string -(`""`) instead of the string `"null"`, which is more in line with `LENGTH(null)` returning -`0` and not `4` since v2.6. - -The output of `TO_STRING()` has also changed for arrays and objects as follows: - -- arrays are now converted into their JSON-stringify equivalents, e.g. - - - `[ ]` is now converted to `[]` - - `[ 1, 2, 3 ]` is now converted to `[1,2,3]` - - `[ "test", 1, 2 ] is now converted to `["test",1,2]` - - Previous versions of ArangoDB converted arrays with no members into the - empty string, and non-empty arrays into a comma-separated list of member - values, without the surrounding angular brackets. Additionally, string - array members were not enclosed in quotes in the result string: - - - `[ ]` was converted to `` - - `[ 1, 2, 3 ]` was converted to `1,2,3` - - `[ "test", 1, 2 ] was converted to `test,1,2` - -- objects are now converted to their JSON-stringify equivalents, e.g. - - - `{ }` is converted to `{}` - - `{ a: 1, b: 2 }` is converted to `{"a":1,"b":2}` - - `{ "test" : "foobar" }` is converted to `{"test":"foobar"}` - - Previous versions of ArangoDB always converted objects into the string - `[object Object]` - -This change also affects other parts in AQL that used `TO_STRING()` to implicitly -cast operands to strings. It also affects the AQL functions `CONCAT()` and -`CONCAT_SEPARATOR()` which treated array values differently. Previous versions -of ArangoDB automatically flattened array values in the first level of the array, -e.g. `CONCAT([1, 2, 3, [ 4, 5, 6 ]])` produced `1,2,3,4,5,6`. Now this will produce -`[1,2,3,[4,5,6]]`. To flatten array members on the top level, you can now use -the more explicit `CONCAT(FLATTEN([1, 2, 3, [4, 5, 6]], 1))`. - -### Arithmetic operators - -As the arithmetic operations in AQL implicitly convert their operands to numeric values using -`TO_NUMBER()`, their casting behavior has also changed as described above. - -Some examples of the changed behavior: - -- `"foo" + 1` produces `1` now. In previous versions this produced `null`. -- `[ 1, 2 ] + 1` produces `1`. In previous versions this produced `null`. -- `1 + "foo" + 1´ produces `2` now. In previous version this produced `1`. - -### Attribute names and parameters - -Previous versions of ArangoDB had some trouble with attribute names that contained the dot -symbol (`.`). Some code parts in AQL used the dot symbol to split an attribute name into -sub-components, so an attribute named `a.b` was not completely distinguishable from an -attribute `a` with a sub-attribute `b`. This inconsistent behavior sometimes allowed "hacks" -to work such as passing sub-attributes in a bind parameter as follows: - -``` -FOR doc IN collection - FILTER doc.@name == 1 - RETURN doc -``` - -If the bind parameter `@name` contained the dot symbol (e.g. `@bind` = `a.b`, it was unclear -whether this should trigger sub-attribute access (i.e. `doc.a.b`) or a access to an attribute -with exactly the specified name (i.e. `doc["a.b"]`). - -ArangoDB 3.0 now handles attribute names containing the dot symbol properly, and sending a -bind parameter `@name` = `a.b` will now always trigger an access to the attribute `doc["a.b"]`, -not the sub-attribute `b` of `a` in `doc`. - -For users that used the "hack" of passing bind parameters containing dot symbol to access -sub-attributes, ArangoDB 3.0 allows specifying the attribute name parts as an array of strings, -e.g. `@name` = `[ "a", "b" ]`, which will be resolved to the sub-attribute access `doc.a.b` -when the query is executed. - -### Keywords - -`LIKE` is now a keyword in AQL. Using `LIKE` in either case as an attribute or collection -name in AQL queries now requires quoting. - - -`SHORTEST_PATH` is now a keyword in AQL. Using `SHORTEST_PATH` in either case as an attribute or collection -name in AQL queries now requires quoting. - -### Subqueries - -Queries that contain subqueries that contain data-modification operations such as `INSERT`, -`UPDATE`, `REPLACE`, `UPSERT` or `REMOVE` will now refuse to execute if the collection -affected by the subquery's data-modification operation is read-accessed in an outer scope -of the query. - -For example, the following query will refuse to execute as the collection `myCollection` -is modified in the subquery but also read-accessed in the outer scope: - -``` -FOR doc IN myCollection - LET changes = ( - FOR what IN myCollection - FILTER what.value == 1 - REMOVE what IN myCollection - ) - RETURN doc -``` - -It is still possible to write to collections from which data is read in the same query, -e.g. - -``` -FOR doc IN myCollection - FILTER doc.value == 1 - REMOVE doc IN myCollection -``` - -and to modify data in different collection via subqueries. - -### Other changes - -The AQL optimizer rule "merge-traversal-filter" that already existed in 3.0 was renamed to -"optimize-traversals". This should be of no relevance to client applications except if -they programatically look for applied optimizer rules in the explain out of AQL queries. - -The order of results created by the AQL functions `VALUES()` and `ATTRIBUTES()` was never -guaranteed and it only had the "correct" ordering by accident when iterating over objects -that were not loaded from the database. As some of the function internals have changed, the -"correct" ordering will not appear anymore, and still no result order is guaranteed by -these functions unless the `sort` parameter is specified (for the `ATTRIBUTES()` function). - -Upgraded V8 version -------------------- - -The V8 engine that is used inside ArangoDB to execute JavaScript code has been upgraded from -version 4.3.61 to 5.0.71.39. The new version should be mostly compatible to the old version, -but there may be subtle differences, including changes of error message texts thrown by the -engine. -Furthermore, some V8 startup parameters have changed their meaning or have been removed in -the new version. This is only relevant when ArangoDB or ArangoShell are started with a custom -value for the `--javascript.v8-options` startup option. - -Among others, the following V8 options change in the new version of ArangoDB: - -- `--es_staging`: in 2.8 it had the meaning `enable all completed harmony features`, in 3.0 - the option means `enable test-worthy harmony features (for internal use only)` - -- `--strong_this`: this option wasn't present in 2.8. In 3.0 it means `don't allow 'this' to - escape from constructors` and defaults to true. - -- `--harmony_regexps`: this options means `enable "harmony regular expression extensions"` - and changes its default value from false to true - -- `--harmony_proxies`: this options means `enable "harmony proxies"` and changes its default - value from false to true - -- `--harmony_reflect`: this options means `enable "harmony Reflect API"` and changes its - default value from false to true - -- `--harmony_sloppy`: this options means `enable "harmony features in sloppy mode"` and - changes its default value from false to true - -- `--harmony_tostring`: this options means `enable "harmony toString"` and changes its - default value from false to true - -- `--harmony_unicode_regexps`: this options means `enable "harmony unicode regexps"` and - changes its default value from false to true - -- `--harmony_arrays`, `--harmony_array_includes`, `--harmony_computed_property_names`, - `--harmony_arrow_functions`, `--harmony_rest_parameters`, `--harmony_classes`, - `--harmony_object_literals`, `--harmony_numeric_literals`, `--harmony_unicode`: - these option have been removed in V8 5. - -As a consequence of the upgrade to V8 version 5, the implementation of the -JavaScript `Buffer` object had to be changed. JavaScript `Buffer` objects in -ArangoDB now always store their data on the heap. There is no shared pool -for small Buffer values, and no pointing into existing Buffer data when -extracting slices. This change may increase the cost of creating Buffers with -short contents or when peeking into existing Buffers, but was required for -safer memory management and to prevent leaks. - -JavaScript API changes ----------------------- - -The following incompatible changes have been made to the JavaScript API in ArangoDB 3.0: - -### Foxx - -The Foxx framework has been completely rewritten for 3.0 with a new, simpler and more -familiar API. To make Foxx services developed for 2.8 or earlier ArangoDB versions run in 3.0, the service's manifest file needs to be edited. - -To enable the legacy mode for a Foxx service, add `"engines": {"arangodb": "^2.8.0"}` -(or similar version ranges that exclude 3.0 and up) to the service manifest file -(named "manifest.json", located in the service's base directory). - -### Require - -Modules shipped with ArangoDB can now be required using the pattern `@arangodb/` -instead of `org/arangodb/`, e.g. - -```js -var cluster = require("@arangodb/cluster"); -``` - -The old format can still be used for compatibility: - -```js -var cluster = require("org/arangodb/cluster"); -``` - -ArangoDB prior to version 3.0 allowed a transparent use of CoffeeScript -source files with the `require()` function. Files with a file name extension -of `coffee` were automatically sent through a CoffeeScript parser and -transpiled into JavaScript on-the-fly. This support is gone with ArangoDB -3.0. To run any CoffeeScript source files, they must be converted to JavaScript -by the client application. - -### Response object - -The `@arangodb/request` response object now stores the parsed JSON response -body in a property `json` instead of `body` when the request was made using the -`json` option. The `body` instead contains the response body as a string. - -### JavaScript Edges API - -When completely replacing an edge via a collection's `replace()` function the replacing -edge data now needs to contain the `_from` and `_to` attributes for the new edge. Previous -versions of ArangoDB did not require the edge data to contain `_from` and `_to` attributes -when replacing an edge, since `_from` and `_to` values were immutable for existing edges. - -For example, the following call worked in ArangoDB 2.8 but will fail in 3.0: - -```js -db.edgeCollection.replace("myKey", { value: "test" }); -``` - -To make this work in ArangoDB 3.0, `_from` and `_to` need to be added to the replacement -data: - -```js -db.edgeCollection.replace("myKey", { _from: "myVertexCollection/1", _to: "myVertexCollection/2", value: "test" }); -``` - -Note that this only affects the `replace()` function but not `update()`, which will -only update the specified attributes of the edge and leave all others intact. - -Additionally, the functions `edges()`, `outEdges()` and `inEdges()` with an array of edge -ids will now make the edge ids unique before returning the connected edges. This is probably -desired anyway, as results will be returned only once per distinct input edge id. However, -it may break client applications that rely on the old behavior. - -### Databases API - -The `_listDatabases()` function of the `db` object has been renamed to `_databases()`, making it -consistent with the `_collections()` function. Also the `_listEndpoints()` function has been -renamed to `_endpoints()`. - -### Collection API - -#### Example matching - -The collection function `byExampleHash()` and `byExampleSkiplist()` have been removed in 3.0. -Their functionality is provided by collection's `byExample()` function, which will automatically -use a suitable index if present. - -The collection function `byConditionSkiplist()` has been removed in 3.0. The same functionality -can be achieved by issuing an AQL query with the target condition, which will automatically use -a suitable index if present. - -#### Javascript Revision id handling - -The `exists()` method of a collection now throws an exception when the specified document -exists but its revision id does not match the revision id specified. Previous versions of -ArangoDB simply returned `false` if either no document existed with the specified key or -when the revision id did not match. It was therefore impossible to distinguish these two -cases from the return value alone. 3.0 corrects this. Additionally, `exists()` in previous -versions always returned a boolean if only the document key was given. 3.0 now returns the -document's meta-data, which includes the document's current revision id. - -Given there is a document with key `test` in collection `myCollection`, then the behavior -of 3.0 is as follows: - -```js -/* test if document exists. this returned true in 2.8 */ -db.myCollection.exists("test"); -{ - "_key" : "test", - "_id" : "myCollection/test", - "_rev" : "9758059" -} - -/* test if document exists. this returned true in 2.8 */ -db.myCollection.exists({ _key: "test" }); -{ - "_key" : "test", - "_id" : "myCollection/test", - "_rev" : "9758059" -} - -/* test if document exists. this also returned false in 2.8 */ -db.myCollection.exists("foo"); -false - -/* test if document with a given revision id exists. this returned true in 2.8 */ -db.myCollection.exists({ _key: "test", _rev: "9758059" }); -{ - "_key" : "test", - "_id" : "myCollection/test", - "_rev" : "9758059" -} - -/* test if document with a given revision id exists. this returned false in 2.8 */ -db.myCollection.exists({ _key: "test", _rev: "1234" }); -JavaScript exception: ArangoError 1200: conflict -``` - -#### Cap constraints - -The cap constraints feature has been removed. This change has led to the removal of the -collection operations `first()` and `last()`, which were internally based on data from -cap constraints. - -As cap constraints have been removed in ArangoDB 3.0 it is not possible to create an -index of type "cap" with a collection's `ensureIndex()` function. The dedicated function -`ensureCapConstraint()` has also been removed from the collection API. - -#### Graph Blueprints JS Module - -The deprecated module `graph-blueprints` has been deleted. -All it's features are covered by the `general-graph` module. - -#### General Graph Fluent AQL interface - -The fluent interface has been removed from ArangoDB. -It's features were completely overlapping with ["aqb"](https://github.com/arangodb/aqbjs) -which comes pre installed as well. -Please switch to AQB instead. - -#### Undocumented APIs - -The undocumented functions `BY_EXAMPLE_HASH()` and `BY_EXAMPLE_SKIPLIST()`, -`BY_CONDITION_SKIPLIST`, `CPP_NEIGHBORS` and `CPP_SHORTEST_PATH` have been removed. -These functions were always hidden and not intended to be part of -the public JavaScript API for collections. - - -HTTP API changes ----------------- - -### CRUD operations - -The following incompatible changes have been made to the HTTP API in ArangoDB 3.0: - -#### General - -The HTTP insert operations for single documents and edges (POST `/_api/document`) do -not support the URL parameter "createCollection" anymore. In previous versions of -ArangoDB this parameter could be used to automatically create a collection upon -insertion of the first document. It is now required that the target collection already -exists when using this API, otherwise it will return an HTTP 404 error. -The same is true for the import API at POST `/_api/import`. - -Collections can still be created easily via a separate call to POST `/_api/collection` -as before. - -The "location" HTTP header returned by ArangoDB when inserting a new document or edge -now always contains the database name. This was also the default behavior in previous -versions of ArangoDB, but it could be overridden by clients sending the HTTP header -`x-arango-version: 1.4` in the request. Clients can continue to send this header to -ArangoDB 3.0, but the header will not influence the location response headers produced -by ArangoDB 3.0 anymore. - -Additionally the CRUD operations APIs do not return an attribute "error" in the -response body with an attribute value of "false" in case an operation succeeded. - -#### Revision id handling - -The operations for updating, replacing and removing documents can optionally check the -revision number of the document to be updated, replaced or removed so the caller can -ensure the operation works on a specific version of the document and there are no -lost updates. - -Previous versions of ArangoDB allowed passing the revision id of the previous document -either in the HTTP header `If-Match` or in the URL parameter `rev`. For example, -removing a document with a specific revision id could be achieved as follows: - -``` -curl -X DELETE \ - "http://127.0.0.1:8529/_api/document/myCollection/myKey?rev=123" -``` - -ArangoDB 3.0 does not support passing the revision id via the "rev" URL parameter -anymore. Instead the previous revision id must be passed in the HTTP header `If-Match`, -e.g. - -``` -curl -X DELETE \ - --header "If-Match: '123'" \ - "http://127.0.0.1:8529/_api/document/myCollection/myKey" -``` - -The URL parameter "policy" was also usable in previous versions of ArangoDB to -control revision handling. Using it was redundant to specifying the expected revision -id via the "rev" parameter or "If-Match" HTTP header and therefore support for the "policy" -parameter was removed in 3.0. - -In order to check for a previous revision id when updating, replacing or removing -documents please use the `If-Match` HTTP header as described above. When no revision -check if required the HTTP header can be omitted, and the operations will work on the -current revision of the document, regardless of its revision id. - -### All documents API - -The HTTP API for retrieving the ids, keys or URLs of all documents from a collection -was previously located at GET `/_api/document?collection=...`. This API was moved to -PUT `/_api/simple/all-keys` and is now executed as an AQL query. -The name of the collection must now be passed in the HTTP request body instead of in -the request URL. The same is true for the "type" parameter, which controls the type of -the result to be created. - -Calls to the previous API can be translated as follows: - -- old: GET `/_api/document?collection=&type=` without HTTP request body -- 3.0: PUT `/_api/simple/all-keys` with HTTP request body `{"collection":"","type":"id"}` - -The result format of this API has also changed slightly. In previous versions calls to -the API returned a JSON object with a `documents` attribute. As the functionality is -based on AQL internally in 3.0, the API now returns a JSON object with a `result` attribute. - -### Edges API - -#### CRUD operations on edges - -The API for documents and edges have been unified in ArangoDB 3.0. The CRUD operations -for documents and edges are now handled by the same endpoint at `/_api/document`. For -CRUD operations there is no distinction anymore between documents and edges API-wise. - -That means CRUD operations concerning edges need to be sent to the HTTP endpoint -`/_api/document` instead of `/_api/edge`. Sending requests to `/_api/edge` will -result in an HTTP 404 error in 3.0. The following methods are available at -`/_api/document` for documents and edge: - -- HTTP POST: insert new document or edge -- HTTP GET: fetch an existing document or edge -- HTTP PUT: replace an existing document or edge -- HTTP PATCH: partially update an existing document or edge -- HTTP DELETE: remove an existing document or edge - -When completely replacing an edge via HTTP PUT please note that the replacing edge -data now needs to contain the `_from` and `_to` attributes for the edge. Previous -versions of ArangoDB did not require sending `_from` and `_to` when replacing edges, -as `_from` and `_to` values were immutable for existing edges. - -The `_from` and `_to` attributes of edges now also need to be present inside the -edges objects sent to the server: - -``` -curl -X POST \ - --data '{"value":1,"_from":"myVertexCollection/1","_to":"myVertexCollection/2"}' \ - "http://127.0.0.1:8529/_api/document?collection=myEdgeCollection" -``` - -Previous versions of ArangoDB required the `_from` and `_to` attributes of edges be -sent separately in URL parameter `from` and `to`: - -``` -curl -X POST \ - --data '{"value":1}' \ - "http://127.0.0.1:8529/_api/edge?collection=e&from=myVertexCollection/1&to=myVertexCollection/2" -``` - -#### Querying connected edges - -The REST API for querying connected edges at GET `/_api/edges/` will now -make the edge ids unique before returning the connected edges. This is probably desired anyway -as results will now be returned only once per distinct input edge id. However, it may break -client applications that rely on the old behavior. - -#### Graph API - -Some data-modification operations in the named graphs API at `/_api/gharial` now return either -HTTP 202 (Accepted) or HTTP 201 (Created) if the operation succeeds. Which status code is returned -depends on the `waitForSync` attribute of the affected collection. In previous versions some -of these operations return HTTP 200 regardless of the `waitForSync` value. - -The deprecated graph API `/_api/graph` has been removed. -All it's features can be replaced using `/_api/gharial` and AQL instead. - -### Simple queries API - -The REST routes PUT `/_api/simple/first` and `/_api/simple/last` have been removed -entirely. These APIs were responsible for returning the first-inserted and -last-inserted documents in a collection. This feature was built on cap constraints -internally, which have been removed in 3.0. - -Calling one of these endpoints in 3.0 will result in an HTTP 404 error. - -### Indexes API - -It is not supported in 3.0 to create an index with type `cap` (cap constraint) in -3.0 as the cap constraints feature has bee removed. Calling the index creation -endpoint HTTP API POST `/_api/index?collection=...` with an index type `cap` will -therefore result in an HTTP 400 error. - -### Log entries API - -The REST route HTTP GET `/_admin/log` is now accessible from within all databases. In -previous versions of ArangoDB, this route was accessible from within the `_system` -database only, and an HTTP 403 (Forbidden) was thrown by the server for any access -from within another database. - -### Figures API - -The REST route HTTP GET `/_api/collection//figures` will not return the -following result attributes as they became meaningless in 3.0: - -- shapefiles.count -- shapes.fileSize -- shapes.count -- shapes.size -- attributes.count -- attributes.size - -### Databases and Collections APIs - -When creating a database via the API POST `/_api/database`, ArangoDB will now always -return the HTTP status code 202 (created) if the operation succeeds. Previous versions -of ArangoDB returned HTTP 202 as well, but this behavior was changable by sending an -HTTP header `x-arango-version: 1.4`. When sending this header, previous versions of -ArangoDB returned an HTTP status code 200 (ok). Clients can still send this header to -ArangoDB 3.0 but this will not influence the HTTP status code produced by ArangoDB. - -The "location" header produced by ArangoDB 3.0 will now always contain the database -name. This was also the default in previous versions of ArangoDB, but the behavior -could be overridden by sending the HTTP header `x-arango-version: 1.4`. Clients can -still send the header, but this will not make the database name in the "location" -response header disappear. - -The result format for querying all collections via the API GET `/_api/collection` -has been changed. - -Previous versions of ArangoDB returned an object with an attribute named `collections` -and an attribute named `names`. Both contained all available collections, but -`collections` contained the collections as an array, and `names` contained the -collections again, contained in an object in which the attribute names were the -collection names, e.g. - -``` -{ - "collections": [ - {"id":"5874437","name":"test","isSystem":false,"status":3,"type":2}, - {"id":"17343237","name":"something","isSystem":false,"status":3,"type":2}, - ... - ], - "names": { - "test": {"id":"5874437","name":"test","isSystem":false,"status":3,"type":2}, - "something": {"id":"17343237","name":"something","isSystem":false,"status":3,"type":2}, - ... - } -} -``` -This result structure was redundant, and therefore has been simplified to just - -``` -{ - "result": [ - {"id":"5874437","name":"test","isSystem":false,"status":3,"type":2}, - {"id":"17343237","name":"something","isSystem":false,"status":3,"type":2}, - ... - ] -} -``` - -in ArangoDB 3.0. - -### Replication APIs - -The URL parameter "failOnUnknown" was removed from the REST API GET `/_api/replication/dump`. -This parameter controlled whether dumping or replicating edges should fail if one -of the vertex collections linked in the edge's `_from` or `_to` attributes was not -present anymore. In this case the `_from` and `_to` values could not be translated into -meaningful ids anymore. - -There were two ways for handling this: -- setting `failOnUnknown` to `true` caused the HTTP request to fail, leaving error - handling to the user -- setting `failOnUnknown` to `false` caused the HTTP request to continue, translating - the collection name part in the `_from` or `_to` value to `_unknown`. - -In ArangoDB 3.0 this parameter is obsolete, as `_from` and `_to` are stored as self-contained -string values all the time, so they cannot get invalid when referenced collections are -dropped. - -The result format of the API GET `/_api/replication/logger-follow` has changed slightly in -the following aspects: - -- documents and edges are reported in the same way. The type for document insertions/updates - and edge insertions/updates is now always `2300`. Previous versions of ArangoDB returned - a `type` value of `2300` for documents and `2301` for edges. -- records about insertions, updates or removals of documents and edges do not have the - `key` and `rev` attributes on the top-level anymore. Instead, `key` and `rev` can be - accessed by peeking into the `_key` and `_rev` attributes of the `data` sub-attributes - of the change record. - -The same is true for the collection-specific changes API GET `/_api/replication/dump`. - -### User management APIs - -The REST API endpoint POST `/_api/user` for adding new users now requires the request to -contain a JSON object with an attribute named `user`, containing the name of the user to -be created. Previous versions of ArangoDB also checked this attribute, but additionally -looked for an attribute `username` if the `user` attribute did not exist. - -### Undocumented HTTP APIs - -The following undocumented HTTP REST endpoints have been removed from ArangoDB's REST -API: - -- `/_open/cerberus` and `/_system/cerberus`: these endpoints were intended for some - ArangoDB-internal applications only -- PUT `/_api/simple/by-example-hash`, PUT `/_api/simple/by-example-skiplist` and - PUT `/_api/simple/by-condition-skiplist`: these methods were documented in early - versions of ArangoDB but have been marked as not intended to be called by end - users since ArangoDB version 2.3. These methods should not have been part of any - ArangoDB manual since version 2.4. -- `/_api/structure`: an older unfinished and unpromoted API for data format and type - checks, superseded by Foxx applications. - -### Administration APIs - -- `/_admin/shutdown` now needs to be called with the HTTP DELETE method - -### Handling of CORS requests - -It can now be controlled in detail for which origin hosts CORS (Cross-origin resource -sharing) requests with credentials will be allowed. ArangoDB 3.0 provides the startup -option `--http.trusted-origin` that can be used to specify one or many origins from -which CORS requests are treated as "trustworthy". - -The option can be specified multiple times, once per trusted origin, e.g. - -``` ---http.trusted-origin http://127.0.0.1:8529 --http.trusted-origin https://127.0.0.1:8599 -``` - -This will make the ArangoDB server respond to CORS requests from these origins with an -`Access-Control-Allow-Credentials` HTTP header with a value of `true`. Web browsers can -inspect this header and can allow passing ArangoDB web interface credentials (if stored -in the browser) to the requesting site. ArangoDB will not forward or provide any credentials. - -Setting this option is only required if applications on other hosts need to access the -ArangoDB web interface or other HTTP REST APIs from a web browser with the same credentials -that the user has entered when logging into the web interface. When a web browser finds -the `Access-Control-Allow-Credentials` HTTP response header, it may forward the credentials -entered into the browser for the ArangoDB web interface login to the other site. - -This is a potential security issue, so there are no trusted origins by default. It may -be required to set some trusted origins if you're planning to issue AJAX requests to ArangoDB -from other sites from the browser, with the credentials entered during the ArangoDB interface -login (i.e. single sign-on). If such functionality is not used, the option should not -be set. - -To specify a trusted origin, specify the option once per trusted origin as shown above. -Note that the trusted origin values specified in this option will be compared bytewise -with the `Origin` HTTP header value sent by clients, and only exact matches will pass. - -There is also the wildcard `all` for enabling CORS access from all origins in a -test or development setup: - -``` ---http.trusted-origin all -``` - -Setting this option will lead to the ArangoDB server responding with an -`Access-Control-Allow-Credentials: true` HTTP header to all incoming CORS requests. - -Command-line options --------------------- - -Quite a few startup options in ArangoDB 2 were double negations (like -`--server.disable-authentication false`). In ArangoDB 3 these are now expressed as -positives (e. g. `--server.authentication`). Also the options between the ArangoDB -server and its client tools have being unified. For example, the logger options are -now the same for the server and the client tools. Additionally many options have -been moved into more appropriate topic sections. - -### Renamed options - -The following options have been available before 3.0 and have changed their name -in 3.0: - -- `--server.disable-authentication` was renamed to `--server.authentication`. - Note that the meaning of the option `--server.authentication` is the opposite of - the previous `--server.disable-authentication`. -- `--server.disable-authentication-unix-sockets` was renamed to - `--server.authentication-unix-sockets`. Note that the meaning of the option - `--server.authentication-unix-sockets` is the opposite of the previous - `--server.disable-authentication-unix-sockets`. -- `--server.authenticate-system-only` was renamed to `--server.authentication-system-only`. - The meaning of the option in unchanged. -- `--server.disable-statistics` was renamed to `--server.statistics`. Note that the - meaning of the option `--server.statistics` is the opposite of the previous - `--server.disable-statistics`. -- `--server.cafile` was renamed to `--ssl.cafile`. The meaning of the option is - unchanged. -- `--server.keyfile` was renamed to `--ssl.keyfile`. The meaning of the option is - unchanged. -- `--server.ssl-cache` was renamed to `--ssl.session-cache`. The meaning of the option - is unchanged. -- `--server.ssl-cipher-list` was renamed to `--ssl.cipher-list`. The meaning of the - option is unchanged. -- `--server.ssl-options` was renamed to `--ssl.options`. The meaning of the option - is unchanged. -- `--server.ssl-protocol` was renamed to `--ssl.protocol`. The meaning of the option - is unchanged. -- `--server.backlog-size` was renamed to `--tcp.backlog-size`. The meaning of the - option is unchanged. -- `--server.reuse-address` was renamed to `--tcp.reuse-address`. The meaning of the - option is unchanged. -- `--server.disable-replication-applier` was renamed to `--database.replication-applier`. - The meaning of the option `--database.replication-applier` is the opposite of the - previous `--server.disable-replication-applier`. -- `--server.allow-method-override` was renamed to `--http.allow-method-override`. The - meaning of the option is unchanged. -- `--server.hide-product-header` was renamed to `--http.hide-product-header`. The - meaning of the option is unchanged. -- `--server.keep-alive-timeout` was renamed to `--http.keep-alive-timeout`. The - meaning of the option is unchanged. -- `--server.foxx-queues` was renamed to `--foxx.queues`. The meaning of the option - is unchanged. -- `--server.foxx-queues-poll-interval` was renamed to `--foxx.queues-poll-interval`. - The meaning of the option is unchanged. -- `--no-server` was renamed to `--server.rest-server`. Note that the meaning of the - option `--server.rest-server` is the opposite of the previous `--no-server`. -- `--database.query-cache-mode` was renamed to `--query.cache-mode`. The meaning of - the option is unchanged. -- `--database.query-cache-max-results` was renamed to `--query.cache-entries`. The - meaning of the option is unchanged. -- `--database.disable-query-tracking` was renamed to `--query.tracking`. The meaning - of the option `--query.tracking` is the opposite of the previous - `--database.disable-query-tracking`. -- `--log.tty` was renamed to `--log.foreground-tty`. The meaning of the option is - unchanged. -- `--upgrade` has been renamed to `--database.auto-upgrade`. In contrast to 2.8 this - option now requires a boolean parameter. To actually perform an automatic database - upgrade at startup use `--database.auto-upgrade true`. To not perform it, use - `--database.auto-upgrade false`. -- `--check-version` has been renamed to `--database.check-version`. -- `--temp-path` has been renamed to `--temp.path`. - -### Log verbosity, topics and output files - -Logging now supports log topics. You can control these by specifying a log -topic in front of a log level or an output. For example - -``` - --log.level startup=trace --log.level info -``` - -will log messages concerning startup at trace level, everything else at info -level. `--log.level` can be specified multiple times at startup, for as many -topics as needed. - -Some relevant log topics available in 3.0 are: - -- *collector*: information about the WAL collector's state -- *compactor*: information about the collection datafile compactor -- *datafiles*: datafile-related operations -- *mmap*: information about memory-mapping operations -- *performance*: some performance-related information -- *queries*: executed AQL queries -- *replication*: replication-related info -- *requests*: HTTP requests -- *startup*: information about server startup and shutdown -- *threads*: information about threads - -The new log option `--log.output ` allows directing the global -or per-topic log output to different outputs. The output definition "" -can be one of - -- "-" for stdin -- "+" for stderr -- "syslog://" -- "syslog:///" -- "file://" - -The option can be specified multiple times in order to configure the output -for different log topics. To set up a per-topic output configuration, use -`--log.output =`, e.g. - - queries=file://queries.txt - -logs all queries to the file "queries.txt". - -The old option `--log.file` is still available in 3.0 for convenience reasons. In -3.0 it is a shortcut for the more general option `--log.output file://filename`. - -The old option `--log.requests-file` is still available in 3.0. It is now a shortcut -for the more general option `--log.output requests=file://...`. - -The old option `--log.performance` is still available in 3.0. It is now a shortcut -for the more general option `--log.level performance=trace`. - -### Removed options for logging - -The options `--log.content-filter` and `--log.source-filter` have been removed. They -have most been used during ArangoDB's internal development. - -The syslog-related options `--log.application` and `--log.facility` have been removed. -They are superseded by the more general `--log.output` option which can also handle -syslog targets. - -### Removed other options - -The option `--server.default-api-compatibility` was present in earlier version of -ArangoDB to control various aspects of the server behavior, e.g. HTTP return codes -or the format of HTTP "location" headers. Client applications could send an HTTP -header "x-arango-version" with a version number to request the server behavior of -a certain ArangoDB version. - -This option was only honored in a handful of cases (described above) and was removed -in 3.0 because the changes in server behavior controlled by this option were changed -even before ArangoDB 2.0. This should have left enough time for client applications -to adapt to the new behavior, making the option superfluous in 3.0. - -### Thread options - -The options `--server.threads` and `--scheduler.threads` now have a default value of -`0`. When `--server.threads` is set to `0` on startup, the suitable number of -threads will be determined by ArangoDB by asking the OS for the number of available -CPUs and using that as a baseline. If the number of CPUs is lower than 4, ArangoDB -will still start 4 dispatcher threads. When `--scheduler.threads` is set to `0`, -then ArangoDB will automatically determine the number of scheduler threads to start. -This will normally create 2 scheduler threads. - -If the exact number of threads needs to be set by the admin, then it is still possible -to set `--server.threads` and `--scheduler.threads` to non-zero values. ArangoDB will -use these values and start that many threads (note that some threads may be created -lazily so they may not be present directly after startup). - -The number of V8 JavaScript contexts to be created (`--javascript.v8-contexts`) now -has a default value of `0` too, meaning that ArangoDB will create as many V8 contexts -as there will be dispatcher threads (controlled by the `--server.threads` option). -Setting this option to a non-zero value will create exactly as many V8 contexts as -specified. - -Setting these options explicitly to non-zero values may be beneficial in environments -that have few resources (processing time, maximum thread count, available memory). - -Authentication --------------- - -The default value for `--server.authentication` is now `true` in the configuration -files shipped with ArangoDB. This means the server will be started with authentication -enabled by default, requiring all client connections to provide authentication data -when connecting to ArangoDB APIs. Previous ArangoDB versions used the setting -`--server.disable-authentication true`, effectively disabling authentication by default. - -The default value for `--server.authentication-system-only` is now `true` in ArangoDB. -That means that Foxx applications running in ArangoDB will be public accessible (at -least they will not use ArangoDB's builtin authentication mechanism). Only requests to -ArangoDB APIs at URL path prefixes `/_api/` and `/_admin` will require authentication. -To change that, and use the builtin authentication mechanism for Foxx applications too, -set `--server.authentication-system-only` to `false`, and make sure to have the option -`--server.authentication` set to `true` as well. - -Though enabling the authentication is recommended for production setups, it may be -overkill in a development environment. To turn off authentication, the option -`--server.authentication` can be set to `false` in ArangoDB's configuration file or -on the command-line. - -Web Admin Interface -------------------- - -The JavaScript shell has been removed from ArangoDB's web interface. The functionality -the shell provided is still fully available in the ArangoShell (arangosh) binary shipped -with ArangoDB. - -ArangoShell and client tools ----------------------------- - -The ArangoShell (arangosh) and the other client tools bundled with ArangoDB can only -connect to an ArangoDB server of version 3.0 or higher. They will not connect to an -ArangoDB 2.8. This is because the server HTTP APIs have changed between 2.8 and 3.0, -and all client tools uses these APIs. - -In order to connect to earlier versions of ArangoDB with the client tools, an older -version of the client tools needs to be kept installed. - -The preferred name for the template string generator function `aqlQuery` is now -`aql` and is automatically available in arangosh. Elsewhere, it can be loaded -like `const aql = require('@arangodb').aql`. - -### Command-line options added - -All client tools in 3.0 provide an option `--server.max-packet-size` for controlling -the maximum size of HTTP packets to be handled by the client tools. The default value -is 128 MB, as in previous versions of ArangoDB. In contrast to previous versions in -which the value was hard-coded, the option is now configurable. It can be increased to -make the client tools handle very large HTTP result messages sent by the server. - -### Command-line options changed - -For all client tools, the option `--server.disable-authentication` was renamed to -`--server.authentication`. Note that the meaning of the option `--server.authentication` -is the opposite of the previous `--server.disable-authentication`. - -The option `--server.ssl-protocol` was renamed to `--ssl.protocol`. The meaning of -the option is unchanged. - -The command-line option `--quiet` was removed from all client tools except arangosh -because it had no effect in them. - -### Arangobench - -In order to make its purpose more apparent the former `arangob` client tool has -been renamed to `arangobench` in 3.0. - -Miscellaneous changes ---------------------- - -The checksum calculation algorithm for the `collection.checksum()` method and its -corresponding REST API GET `/_api/collection/= 3.1 can ArangoDB 3.0 database directories and will simply continue -to use the old `_rev` attribute values. New revisions will be written with -the new time stamps. - -It is highly recommended to backup all your data before loading a database -directory that was written by ArangoDB <= 3.0 into an ArangoDB >= 3.1. - -To change all your old `_rev` attributes into new style time stamps you -have to use `arangodump` to dump all data out (using ArangoDB 3.0), and -use `arangorestore` into the new ArangoDB 3.1, which is the safest -way to upgrade. - -The change also affects the return format of `_rev` values and other revision -values in HTTP APIs (see below). - -HTTP API changes ----------------- - -### APIs added - -The following HTTP REST APIs have been added for online log level adjustment of -the server: - -* GET `/_admin/log/level` returns the current log level settings -* PUT `/_admin/log/level` modifies the current log level settings - -### APIs changed - -* the following REST APIs that return revision ids now make use of the new revision - id format introduced in 3.1. All revision ids returned will be strings as in 3.0, but - have a different internal format. - - The following APIs are affected: - - GET /_api/collection/{collection}/checksum: `revision` attribute - - GET /_api/collection/{collection}/revision: `revision` attribute - - all other APIs that return documents, which may include the documents' `_rev` attribute - - Client applications should not try to interpret the internals of revision values, but only - use revision values for checking whether two revision strings are identical. - -* the replication REST APIs will now use the attribute name `journalSize` instead of - `maximalSize` when returning information about collections. - -* the default value for `keepNull` has been changed from `false` to `true` for - the following partial update operations for vertices and edges in /_api/gharial: - - - PATCH /_api/gharial/{graph}/vertex/{collection}/{key} - - PATCH /_api/gharial/{graph}/edge/{collection}/{key} - - The value for `keepNull` can still be set explicitly to `false` by setting the - URL parameter `keepNull` to a value of `false`. - -* the REST API for dropping collections (DELETE /_api/collection) now accepts an - optional query string parameter `isSystem`, which can set to `true` in order to - drop system collections. If the parameter is not set or not set to true, the REST - API will refuse to drop system collections. In previous versions of ArangoDB, the - `isSystem` parameter did not exist, and there was no distinction between system - and non-system collections when dropping collections. - -* the REST API for retrieving AQL query results (POST /_api/cursor) will now return an - additional sub-attribute `loading collections` that will contain the total time - required for loading and locking collections during the AQL query when profiling is - enabled. The attribute can be found in the `extra` result attribute in sub-attribute - `loading collections`. The attribute will only be set if profiling was enabled for - the query. - -* the REST API for retrieving AQL query results (POST /_api/cursor) will now accept the optional attribute `memoryLimit`. - -Foxx Testing ------------- - -The QUnit interface to Mocha has been removed. This affects the behaviour of the `suite`, `test`, `before`, `after`, `beforeEach` and `afterEach` functions in Foxx test suites. The `suite` and `test` functions are now provided by the TDD interface. The `before`, `after`, `beforeEach` and `afterEach` functions are now provided by the BDD interface. - -This should not cause any problems with existing tests but may result in failures in test cases that previously passed for the wrong reasons. Specifically the execution order of the `before`, `after`, etc functions now follows the intended order and is no longer arbitrary. - -For details on the expected behaviour of these functions see the [testing chapter](../Foxx/Guides/Testing.md) in the Foxx documentation. diff --git a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges32.md b/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges32.md deleted file mode 100644 index 08ee7f3dbcae..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges32.md +++ /dev/null @@ -1,147 +0,0 @@ -Incompatible changes in ArangoDB 3.2 -==================================== - -It is recommended to check the following list of incompatible changes **before** -upgrading to ArangoDB 3.2, and adjust any client programs if necessary. - -AQL ---- - -* AQL breaking change in cluster: - The SHORTEST_PATH statement using edge-collection names instead - of a graph name now requires to explicitly name the vertex-collection names - within the AQL query in the cluster. It can be done by adding `WITH ` - at the beginning of the query. - - Example: - ``` - FOR v,e IN OUTBOUND SHORTEST_PATH @start TO @target edges [...] - ``` - - Now has to be: - - ``` - WITH vertices - FOR v,e IN OUTBOUND SHORTEST_PATH @start TO @target edges [...] - ``` - - This change is due to avoid dead-lock sitations in clustered case. - An error stating the above is included. - - -REST API --------- - -* Removed undocumented internal HTTP API: - * PUT /_api/edges - - The documented GET /_api/edges and the undocumented POST /_api/edges remains unmodified. - -* change undocumented behaviour in case of invalid revision ids in - `If-Match` and `If-None-Match` headers from returning HTTP status code 400 (bad request) - to returning HTTP status code 412 (precondition failed). - -* the REST API for fetching the list of currently running AQL queries and the REST API - for fetching the list of slow AQL queries now return an extra *bindVars* attribute which - contains the bind parameters used by the queries. - - This affects the return values of the following API endpoints: - * GET /_api/query/current - * GET /_api/query/slow - -* The REST API for retrieving indexes (GET /_api/index) now returns the *deduplicate* - attribute for each index - -* The REST API for creating indexes (POST /_api/index) now accepts the optional *deduplicate* - attribute - -* The REST API for executing a server-side transaction (POST /_api/transaction) now accepts the optional attributes: `maxTransactionSize`, `intermediateCommitCount`, `intermediateCommitSize` - -* The REST API for creating a cursor (POST /_api/cursor) now accepts the optional attributes: `failOnWarning`, `maxTransactionSize`, `maxWarningCount`, `intermediateCommitCount`, `satelliteSyncWait`, `intermediateCommitSize`. `skipInaccessibleCollections` - -JavaScript API --------------- - -* change undocumented behaviour in case of invalid revision ids in - JavaScript document operations from returning error code 1239 ("illegal document revision") - to returning error code 1200 ("conflict"). - -* the `collection.getIndexes()` function now returns the *deduplicate* attribute for each index - -* the `collection.ensureIndex()` function now accepts the optional *deduplicate* attribute - - -Foxx ----- - -* JWT tokens issued by the built-in [JWT session storage](../Foxx/Reference/Sessions/Storages/JWT.md) now correctly specify the `iat` and `exp` values in seconds rather than milliseconds as specified in the JSON Web Token standard. - - This may result in previously expired tokens using milliseconds being incorrectly accepted. For this reason it is recommended to replace the signing `secret` or set the new `maxExp` option to a reasonable value that is smaller than the oldest issued expiration timestamp. - - For example setting `maxExp` to `10**12` would invalidate all incorrectly issued tokens before 9 September 2001 without impairing new tokens until the year 33658 (at which point these tokens are hopefully no longer relevant). - -* ArangoDB running in standalone mode will commit all services in the `javascript.app-path` to the database on startup. This may result in uninstalled services showing up in ArangoDB if they were not properly removed from the filesystem. - -* ArangoDB coordinators in a cluster now perform a self-healing step during startup to ensure installed services are consistent accross all coordinators. We recommend backing up your services and configuration before upgrading to ArangoDB 3.2, especially if you have made use of the development mode. - -* Services installed before upgrading to 3.2 (including services installed on alpha releases of ArangoDB 3.2) are **NOT** picked up by the coordinator self-healing watchdog. This can be solved by either upgrading/replacing these services or by using the ["commit" route of the Foxx service management HTTP API](../../HTTP/Foxx/Miscellaneous.html), which commits the exact services installed on a given coordinator to the cluster. New services will be picked up automatically. - -* The format used by Foxx to store internal service metadata in the database has been simplified and existing documents will be updated to the new format. If you have made any changes to the data stored in the `_apps` system collection, you may wish to export these changes as they will be overwritten. - -* There is now an [official HTTP API for managing services](../../HTTP/Foxx/index.html). If you were previously using any of the undocumented APIs or the routes used by the administrative web interface we highly recommend migrating to the new API. The old undocumented HTTP API for mananaging services is deprecated and will be removed in a future version of ArangoDB. - -* Although changes to the filesystem outside of development mode were already strongly discouraged, this is a reminder that they are no longer supported. All files generated by services (whether by a setup script or during normal operation such as uploads) should either be stored outside the service directory or be considered extremely volatile. - -* Introduced distinction between `arangoUser` and `authorized` in Foxx requests. Cluster internal requests will never have an `arangoUser` but are authorized. In earlier versions of ArangoDB parts of the statistics were not accessible by the coordinators because the underlying Foxx service couldn't authorize the requests. It now correctly checks the new `req.authorized` property. `req.arangoUser` still works as before. Endusers may use this new property as well to easily check if a request is authorized or not regardless of a specific user. - - -Command-line options changed ----------------------------- - -* --server.maximal-queue-size is now an absolute maximum. If the queue is - full, then 503 is returned. Setting it to 0 means "no limit". The default - value for this option is now `0`. - -* the default value for `--ssl.protocol` has been changed from `4` (TLSv1) to `5` (TLSv1.2). - -* the startup options `--database.revision-cache-chunk-size` and - `--database.revision-cache-target-size` are now obsolete and do nothing - -* the startup option `--database.index-threads` option is now obsolete - -* the option `--javascript.v8-contexts` is now an absolute maximum. The server - may start less V8 contexts for JavaScript execution at startup. If at some - point the server needs more V8 contexts it may start them dynamically, until - the number of V8 contexts reaches the value of `--javascript.v8-contexts`. - - the minimum number of V8 contexts to create at startup can be configured via - the new startup option `--javascript.v8-contexts-minimum`. - -* added command-line option `--javascript.allow-admin-execute` - - This option can be used to control whether user-defined JavaScript code - is allowed to be executed on server by sending via HTTP to the API endpoint - `/_admin/execute` with an authenticated user account. - The default value is `false`, which disables the execution of user-defined - code. This is also the recommended setting for production. In test environments, - it may be convenient to turn the option on in order to send arbitrary setup - or teardown commands for execution on the server. - - The introduction of this option changes the default behavior of ArangoDB 3.2: - 3.2 now by default disables the execution of JavaScript code via this API, - whereas earlier versions allowed it. To restore the old behavior, it is - necessary to set the option to `true`. - - -Users Management ----------------------- - -* It is no longer supported to access the `_users` collecction in any way directly, except through the official `@arangodb/users` module or the `_apit/users` REST API. - -* The access to the `_users` collection from outside of the arangod server process is now forbidden (Through drivers, arangosh or the REST API). Foxx services are still be able to access the `_users` collection for now, but this might change in future minor releases. - -* The internal format of the documents in the `_users` collection has changed from previous versions - -* The `_queues` collection only allows read-only access from outside of the arangod server process. - -* Accessing `_queues` is only supported through the official `@arangodb/queues` module for Foxx apps. diff --git a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges33.md b/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges33.md deleted file mode 100644 index 759dd2d5abde..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges33.md +++ /dev/null @@ -1,63 +0,0 @@ -Incompatible changes in ArangoDB 3.3 -==================================== - -It is recommended to check the following list of incompatible changes **before** -upgrading to ArangoDB 3.3, and adjust any client programs if necessary. - -The following incompatible changes have been made in ArangoDB 3.3: - -* AQL: during a traversal if a vertex is not found, arangod will not log an error and - continue with a NULL value, but will instead register a warning at the query and - continue with a NULL value. - - If a non-existing vertex is referenced from a traversal, it is not desirable to log - errors as ArangoDB can store edges pointing to non-existing vertices (which is perfectly - valid if the low-level insert APIs are used). As linking to non-existing vertices - may indicate an issue in/with the data model or the client application, the warning is - registered in the query so client applications have access to it. - -* ArangoDB usernames must not start with the string `:role:`. - -* The startup configuration parameter `--cluster.my-id` does not have any effect in 3.3. - For compatibility reasons, ArangoDB 3.3 will not fail on startup if the option is - still used in the configuration, but it will silently ignore this option. - -* The startup configuration parameter `--cluster.my-local-info` is deprecated now. - Using it will make arangod log a warning on startup. - -* Server startup: the recommended value for the Linux kernel setting in - `/proc/sys/vm/max_map_count` was increased to a value eight times as high as in - 3.2. arangod compares at startup if the effective value of this setting is - presumably too low, and it will issue a warning in this case, recommending to - increase the value. - - This is now more likely to happen than in previous versions, as the recommended - value is now eight times higher than in 3.2. The startup warnings will look like - this (with actual numbers varying): - - WARNING {memory} maximum number of memory mappings per process is 65530, which seems too low. it is recommended to set it to at least 512000 - - Please refer to [the Linux kernel documentation](https://www.kernel.org/doc/Documentation/sysctl/vm.txt) - for more information on this setting. This change only affects the Linux version of ArangoDB. - - -Client tools ------------- - -* The option `--recycle-ids` has been removed from the arangorestore command. - Using this option could have led to problems on the restore, with potential - id conflicts between the originating server (the source dump server) and the - target server (the restore server). - -* The option `--compat` has been removed from the arangodump command - and the `/_api/replication/dump` REST API endpoint. - In order to create a dump from an ArangoDB 2.8 instance, please use an older - version of the client tools. Older ArangoDB versions are no longer be supported by - the arangodump and arangorestore binaries shipped with 3.3. - -Miscellaneous -------------- - -The minimum supported compiler for compiling ArangoDB from source is now g++ 5.4 -(bumped up from g++ 4.9). This change only affects users that compile ArangoDB on -their own. diff --git a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges34.md b/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges34.md deleted file mode 100644 index 8e07228b53a4..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges34.md +++ /dev/null @@ -1,850 +0,0 @@ -Incompatible changes in ArangoDB 3.4 -==================================== - -It is recommended to check the following list of incompatible changes **before** -upgrading to ArangoDB 3.4, and adjust any client programs if necessary. - -The following incompatible changes have been made in ArangoDB 3.4: - - -Release packages ----------------- - -The official ArangoDB release packages for Linux are now built as static executables -linked with the [musl libc](https://www.musl-libc.org/) standard library. For Linux, -there are release packages for the Debian-based family of Linux distributions (.deb), -and packages for RedHat-based distributions (.rpm). There are no specialized binaries -for the individual Linux distributions nor for their individual subversions. - -The release packages are intended to be reasonably portable (see minimum supported -architectures below) and should run on a variety of different Linux distributions and -versions. - -Release packages are provided for Windows and macOS as well. - - -Supported architectures ------------------------ - -The minimum supported architecture for the official release packages of ArangoDB is -now the Nehalem architecture. - -All release packages are built with compiler optimizations that require at least -this architecture. The following CPU features are required for running an official -release package (note: these are all included in the Nehalem architecture and upwards): - -* SSE2 -* SSE3 -* SSE4.1 -* SSE4.2 - -In case the target platform does not conform to these requirements, ArangoDB may -not work correctly. - -The compiled-in architecture optimizations can be retrieved on most platforms by -invoking the *arangod* binary with the `--version` option. The optimization switches -will then show up in the output in the line starting with `optimization-flags`, e.g. - -``` -$ arangod --version -... -optimization-flags: -march=nehalem -msse2 -msse3 -mssse3 -msse4.1 -msse4.2 -mno-sse4a -mno-avx -mno-fma -mno-bmi2 -mno-avx2 -mno-xop -mno-fma4 -mno-avx512f -mno-avx512vl -mno-avx512pf -mno-avx512er -mno-avx512cd -mno-avx512dq -mno-avx512bw -mno-avx512ifma -mno-avx512vbmi -platform: linux -``` - -Note that to get even more target-specific optimizations, it is possible for end -users to compile ArangoDB on their own with compiler optimizations tailored to the -target environment. - - -Target host requirements ------------------------- - -When the ArangoDB service is started on a Linux host, it will switch to user -`arangodb` and group `arangodb` at some point during the startup process. -This user and group are created during ArangoDB package installation as usual. - -However, if either the group `arangodb` or the user `arangodb` cannot be found in -the target hosts local `/etc/group` or `/etc/passwd` storage (for example, -because system users and groups are stored centrally using NIS, LDAP etc.), then -the underlying group-lookup implementation used by ArangoDB will always consult -the local nscd (name-service cache daemon) for this. Effectively this requires -a running nscd instance on hosts that ArangoDB is installed on and that do store -the operating system users in a place other than the host-local `/etc/group` and -`/etc/passwd`. - - -Storage engine --------------- - -In ArangoDB 3.4, the default storage engine for new installations is the RocksDB -engine. This differs to previous versions (3.2 and 3.3), in which the default -storage engine was the MMFiles engine. - -The MMFiles engine can still be explicitly selected as the storage engine for -all new installations. It's only that the "auto" setting for selecting the storage -engine will now use the RocksDB engine instead of MMFiles engine. - -In the following scenarios, the effectively selected storage engine for new -installations will be RocksDB: - -* `--server.storage-engine rocksdb` -* `--server.storage-engine auto` -* `--server.storage-engine` option not specified - -The MMFiles storage engine will be selected for new installations only when -explicitly selected: - -* `--server.storage-engine mmfiles` - - -To make users aware of that the RocksDB storage engine was chosen automatically -due to an explicit other storage engine selection, 3.4 will come up with the following -startup warning: - - using default storage engine 'rocksdb', as no storage engine was explicitly selected via the `--server.storage-engine` option. - please note that default storage engine has changed from 'mmfiles' to 'rocksdb' in ArangoDB 3.4 - - -On upgrade, any existing ArangoDB installation will keep its previously selected -storage engine. The change of the default storage engine in 3.4 is thus only relevant -for new ArangoDB installations and/or existing cluster setups for which new server -nodes get added later. All server nodes in a cluster setup should use the same -storage engine to work reliably. Using different storage engines in a cluster is -unsupported. - -To validate that the different nodes in a cluster deployment use the same storage -engine throughout the entire cluster, there is now a startup check performed by -each coordinator. Each coordinator will contact all DB servers and check if the -same engine on the DB server is the same as its local storage engine. In case -there is any discrepancy, the coordinator will abort its startup. - - -Geo indexes ------------ - -- The on-disk storage format for indexes of type `geo` has changed for the RocksDB - storage engine. This also affects `geo1` and `geo2` indexes. - - This **requires** users to start the arangod process with the - `--database.auto-upgrade true` option to allow ArangoDB recreating these - indexes using the new on-disk format. - - The on-disk format for geo indexes is incompatible with the on-disk format used - in 3.3 and 3.2, so an in-place downgrade from 3.4 to 3.3 is not supported. - -- Geo indexes will now be reported no longer as _geo1_ or _geo2_ but as type `geo`. - The two previously known geo index types (`geo1`and `geo2`) are **deprecated**. - APIs for creating indexes (`ArangoCollection.ensureIndex`) will continue to support - `geo1`and `geo2`. - - -RocksDB engine data storage format ----------------------------------- - -Installations that start using ArangoDB 3.4 will use an optimized on-disk format -for storing documents using the RocksDB storage engine. The RocksDB engine will also -a new table format version that was added in a recent version of the RocksDB library -and that is not available in ArangoDB versions before 3.4. - -This format cannot be used with ArangoDB 3.3 or before, meaning it is not possible to -perform an in-place downgrade from a fresh 3.4 install to 3.3 or earlier when using the -RocksDB engine. For more information on how to downgrade, please refer to the -[Downgrading](../Downgrading/README.md) chapter. - -Installations that were originally set up with older versions of ArangoDB (e.g. 3.2 -or 3.3) will continue to use the existing on-disk format for the RocksDB engine -even with ArangoDB 3.4 (unless you install a fresh 3.4 package and restore a backup -of your data on this fresh installation). - -In order to use the new binary format with existing data, it is required to -create a logical dump of the database data, shut down the server, erase the -database directory and restore the data from the logical dump. To minimize -downtime you can alternatively run a second arangod instance in your system, -that replicates the original data; once the replication has reached completion, -you can switch the instances. - - -RocksDB intermediate commits ------------------------------ - -Intermediate commits in the rocksdb engine are now only enabled in standalone AQL queries -(not within a JS transaction), standalone truncate as well as for the "import" API. - -The options `intermediateCommitCount` and `intermediateCommitSize` will have no affect -anymore on transactions started via `/_api/transaction`, or `db._executeTransaction()`. - - -RocksDB background sync thread ------------------------------- - -The RocksDB storage engine in 3.4 has a background WAL syncing thread that by default -syncs RocksDB's WAL to disk every 100 milliseconds. This may cause additional background -I/Os compared to ArangoDB 3.3, but will distribute the sync calls more evenly over time -than the all-or-nothing file syncs that were performed by previous versions of ArangoDB. - -The syncing interval can be configured by adjusting the configuration option -`--rocksdb.sync-interval`. - -Note: this option is not supported on Windows platforms. Setting the sync interval to -to a value greater than 0 will produce a startup warning on Windows. - - -RocksDB write buffer size -------------------------- - -The total amount of data to build up in all in-memory write buffers (backed by log -files) is now by default restricted to a certain fraction of the available physical -RAM. This helps restricting memory usage for the arangod process, but may have an -effect on the RocksDB storage engine's write performance. - -In ArangoDB 3.3 the governing configuration option `--rocksdb.total-write-buffer-size` -had a default value of `0`, which meant that the memory usage was not limited. ArangoDB -3.4 now changes the default value to about 40% of available physical RAM, and 512MiB -for setups with less than 4GiB of RAM. - - -Threading and request handling ------------------------------- - -The processing of incoming requests and the execution of requests by server threads -has changed in 3.4. - -Previous ArangoDB versions had a hard-coded implicit lower bound of 64 running -threads, and up to which they would increase the number of running server threads. -That value could be increased further by adjusting the option `--server.maximal-threads`. -The configuration option `--server.threads` existed, but did not effectively set -or limit the number of running threads. - -In ArangoDB 3.4, the number of threads ArangoDB uses for request handling can now -be strictly bounded by configuration options. - -The number of server threads is now configured by the following startup options: - -- `--server.minimal-threads`: determines the minimum number of request processing - threads the server will start and always keep around -- `--server.maximal-threads`: determines the maximum number of request processing - threads the server will start for request handling. If that number of threads is - already running, arangod will not start further threads for request handling - -The actual number of request processing threads is adjusted dynamically at runtime -and will float between `--server.minimal-threads` and `--server.maximal-threads`. - - -HTTP REST API -------------- - -The following incompatible changes were made in context of ArangoDB's HTTP REST -APIs: - -- The following, partly undocumented internal REST APIs have been removed in ArangoDB 3.4: - - - `GET /_admin/test` - - `GET /_admin/clusterCheckPort` - - `GET /_admin/cluster-test` - - `GET /_admin/routing/routes` - - `GET /_admin/statistics/short` - - `GET /_admin/statistics/long` - - `GET /_admin/auth/reload` - -- `GET /_api/index` will now return type `geo` for geo indexes, not type `geo1` - or `geo2` as previous versions did. - - For geo indexes, the index API will not return the attributes `constraint` and - `ignoreNull` anymore. These attributes were initially deprecated in ArangoDB 2.5 - -- `GET /_api/aqlfunction` was migrated to match the general structure of - ArangoDB replies. It now returns an object with a "result" attribute that - contains the list of available AQL user functions: - - ```json - { - "code": 200, - "error": false, - "result": [ - { - "name": "UnitTests::mytest1", - "code": "function () { return 1; }", - "isDeterministic": false - } - ] - } - ``` - - In previous versions, this REST API returned only the list of available - AQL user functions on the top level of the response. - Each AQL user function description now also contains the 'isDeterministic' attribute. - -- if authentication is turned on, requests to databases by users with insufficient - access rights will be answered with HTTP 401 (Forbidden) instead of HTTP 404 (Not found). - -- the REST handler for user permissions at `/_api/user` will now return HTTP 404 - (Not found) when trying to grant or revoke user permissions for a non-existing - collection. - - This affects the HTTP PUT calls to the endpoint `/_api/user///` - for collections that do not exist. - -The following APIs have been added or augmented: - -- additional `stream` attribute in queries HTTP API - - The REST APIs for retrieving the list of currently running and slow queries - at `GET /_api/query/current` and `GET /_api/query/slow` are now returning an - additional attribute `stream` for each query. - - This attribute indicates whether the query was started using a streaming cursor. - -- `POST /_api/document/{collection}` now supports repsert (replace-insert). - - This can be achieved by using the URL parameter `overwrite=true`. When set to - `true`, insertion will not fail in case of a primary key conflict, but turn - into a replace operation. - - When an insert turns into a replace, the previous version of the document can - be retrieved by passing the URL parameter `returnOld=true` - -- `POST /_api/aqlfunction` now includes an "isNewlyCreated" attribute that indicates - if a new function was created or if an existing one was replaced (in addition to the - "code" attribute, which remains 200 for replacement and 201 for creation): - - ```json - { - "code": "201", - "error": false, - "isNewlyCreated": true - } - ``` - -- `DELETE /_api/aqlfunction` now returns the number of deleted functions: - - ```json - { - "code": 200, - "error": false, - "deletedCount": 10 - } - ``` - -- `GET /_admin/status` now returns the attribute `operationMode` in addition to - `mode`. The attribute `writeOpsEnabled` is now also represented by the new - attribute `readOnly`, which is has an inverted value compared to the original - attribute. The old attributes are deprecated in favor of the new ones. - -- `POST /_api/collection` now will process the optional `shardingStrategy` - attribute in the response body in cluster mode. - - This attribute specifies the name of the sharding strategy to use for the - collection. Since ArangoDB 3.4 there are different sharding strategies to - select from when creating a new collection. The selected *shardingStrategy* - value will remain fixed for the collection and cannot be changed afterwards. - This is important to make the collection keep its sharding settings and - always find documents already distributed to shards using the same initial - sharding algorithm. - - The available sharding strategies are: - - `community-compat`: default sharding used by ArangoDB - Community Edition before version 3.4 - - `enterprise-compat`: default sharding used by ArangoDB - Enterprise Edition before version 3.4 - - `enterprise-smart-edge-compat`: default sharding used by smart edge - collections in ArangoDB Enterprise Edition before version 3.4 - - `hash`: default sharding used for new collections starting from version 3.4 - (excluding smart edge collections) - - `enterprise-hash-smart-edge`: default sharding used for new - smart edge collections starting from version 3.4 - - If no sharding strategy is specified, the default will be `hash` for - all collections, and `enterprise-hash-smart-edge` for all smart edge - collections (requires the *Enterprise Edition* of ArangoDB). - Manually overriding the sharding strategy does not yet provide a - benefit, but it may later in case other sharding strategies are added. - - In single-server mode, the *shardingStrategy* attribute is meaningless and - will be ignored. - -- a new API for inspecting the contents of the AQL query results cache has been added - to endpoint `GET /_api/query/cache/entries` - - This API returns the current contents of the AQL query results cache of the - currently selected database. - -- APIs for view management have been added at endpoint `/_api/view`. - -- The REST APIs for modifying graphs at endpoint `/_api/gharial` now support returning - the old revision of vertices / edges after modifying them. The APIs also supports - returning the just-inserted vertex / edge. This is in line with the already existing - single-document functionality provided at endpoint `/_api/document`. - - The old/new revisions can be accessed by passing the URL parameters `returnOld` and - `returnNew` to the following endpoints: - - * `/_api/gharial//vertex/` - * `/_api/gharial//edge/` - - The exception from this is that the HTTP DELETE verb for these APIs does not - support `returnOld` because that would make the existing API incompatible. - - -AQL ---- - -- the AQL functions `CALL` and `APPLY` may now throw the errors 1540 -(`ERROR_QUERY_FUNCTION_NAME_UNKNOWN`) and 1541 (`ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH`) -instead of error 1582 (`ERROR_QUERY_FUNCTION_NOT_FOUND`) in some situations. - -- the existing "fulltext-index-optimizer" optimizer rule has been removed - because its duty is now handled by the new "replace-function-with-index" rule. - -- the behavior of the `fullCount` option for AQL queries has changed so that it - will only take into account `LIMIT` statements on the top level of the query. - - `LIMIT` statements in subqueries will not have any effect on the `fullCount` results - any more. - -- the AQL functions `NEAR`, `WITHIN`, `WITHIN_RECTANGLE` and `FULLTEXT` do not - support accessing collections dynamically anymore. - - The name of the underlying collection and the name of the index attribute to be - used have to specified using either collection name identifiers, string literals - or bind parameters, but must not be specified using query variables. - - For example, the following AQL queries are ok: - - FOR doc IN NEAR(myCollection, 2.5, 3) RETURN doc - FOR doc IN NEAR(@@collection, 2.5, 3) RETURN doc - FOR doc IN FULLTEXT("myCollection", "body", "foxx") RETURN doc - FOR doc IN FULLTEXT(@@collection, @attribute, "foxx") RETURN doc - - Contrary, the following queries will fail to execute with 3.4 because of dynamic - collection/attribute names used in them: - - FOR name IN ["col1", "col2"] FOR doc IN NEAR(name, 2.5, 3) RETURN doc - - FOR doc IN collection - FOR match IN FULLTEXT(PARSE_IDENTIFIER(doc).collection, PARSE_IDENTIFIER(doc).key, "foxx") RETURN doc - -- the AQL warning 1577 ("collection used in expression") will not occur anymore - - It was used in previous versions of ArangoDB when the name of a collection was - used in an expression in an AQL query, e.g. - - RETURN c1 + c2 - - Due to internal changes in AQL this is not detected anymore in 3.4, so this - particular warning will not be raised. - - Additionally, using collections in arbitrary AQL expressions as above is unsupported - in a mixed cluster that is running a 3.3 coordinator and 3.4 DB server(s). The - DB server(s) running 3.4 will in this case not be able to use a collection in an - arbitrary expression, and instead throw an error. - -- the undocumented built-in visitor functions for AQL traversals have been removed, - as they were based on JavaScript implementations: - - - `HASATTRIBUTESVISITOR` - - `PROJECTINGVISITOR` - - `IDVISITOR` - - `KEYVISITOR` - - `COUNTINGVISITOR` - - Using any of these functions from inside AQL will now produce an error. - -- in previous versions, the AQL optimizer used two different ways of converting - strings into numbers. The two different ways have been unified into a single - way that behaves like the `TO_NUMBER` AQL function, which is also the documented - behavior. - - The change affects arithmetic operations with strings that contain numbers and - other trailing characters, e.g. - - expression 3.3 result 3.4 result TO_NUMBER() - 0 + "1a" 0 + 1 = 1 0 + 0 = 0 TO_NUMBER("1a") = 0 - 0 + "1 " 0 + 1 = 1 0 + 1 = 1 TO_NUMBER("1 ") = 1 - 0 + " 1" 0 + 1 = 1 0 + 1 = 1 TO_NUMBER(" 1") = 1 - 0 + "a1" 0 + 0 = 0 0 + 0 = 0 TO_NUMBER("a1") = 0 - -- the AQL function `DATE_NOW` is now marked as deterministic internally, meaning that - the optimizer may evaluate the function at query compile time and not at query - runtime. This will mean that calling the function repeatedly inside the same query will - now always produce the same result, whereas in previous versions of ArangoDB the - function may have generated different results. - - Each AQL query that is run will still evaluate the result value of the `DATE_NOW` - function independently, but only once at the beginning of the query. This is most - often what is desired anyway, but the change makes `DATE_NOW` useless to measure - time differences inside a single query. - -- the internal AQL function `PASSTHRU` (which simply returns its call argument) - has been changed from being non-deterministic to being deterministic, provided its - call argument is also deterministic. This change should not affect end users, as - `PASSTHRU` is intended to be used for internal testing only. Should end users use - this AQL function in any query and need a wrapper to make query parts non-deterministic, - the `NOOPT` AQL function can stand in as a non-deterministic variant of `PASSTHRU` - -- the AQL query optimizer will by default now create at most 128 different execution - plans per AQL query. In previous versions the maximum number of plans was 192. - - Normally the AQL query optimizer will generate a single execution plan per AQL query, - but there are some cases in which it creates multiple competing plans. More plans - can lead to better optimized queries, however, plan creation has its costs. The - more plans are created and shipped through the optimization pipeline, the more - time will be spent in the optimizer. - To make the optimizer better cope with some edge cases, the maximum number of plans - to create is now strictly enforced and was lowered compared to previous versions of - ArangoDB. - - Note that this default maximum value can be adjusted globally by setting the startup - option `--query.optimizer-max-plans` or on a per-query basis by setting a query's - `maxNumberOfPlans` option. - -- When creating query execution plans for a query, the query optimizer was fetching - the number of documents of the underlying collections in case multiple query - execution plans were generated. The optimizer used these counts as part of its - internal decisions and execution plan costs calculations. - - Fetching the number of documents of a collection can have measurable overhead in a - cluster, so ArangoDB 3.4 now caches the "number of documents" that are referred to - when creating query execution plans. This may save a few roundtrips in case the - same collections are frequently accessed using AQL queries. - - The "number of documents" value was not and is not supposed to be 100% accurate - in this stage, as it is used for rough cost estimates only. It is possible however - that when explaining an execution plan, the "number of documents" estimated for - a collection is using a cached stale value, and that the estimates change slightly - over time even if the underlying collection is not modified. - -- AQL query results that are served from the AQL query results cache can now return - the *fullCount* attribute as part of the query statistics. Alongside the *fullCount* - attribute, other query statistics will be returned. However, these statistics will - reflect figures generated during the initial query execution, so especially a - query's *executionTime* figure may be misleading for a cached query result. - - -Usage of V8 ------------ - -The internal usage of the V8 JavaScript engine for non-user actions has been -reduced in ArangoDB 3.4. Several APIs have been rewritten to not depend on V8 -and thus do not require using the V8 engine nor a V8 context for execution -anymore. - -Compared to ArangoDB 3.3, the following parts of ArangoDB can now be used -without the V8 engine: - -- agency nodes in a cluster -- database server nodes in a cluster -- cluster plan application on database server nodes -- all of AQL (with the exception of user-defined functions) -- the graph modification APIs at endpoint `/_api/gharial` -- background statistics gathering - -Reduced usage of V8 in ArangoDB may allow end users to lower the configured -numbers of V8 contexts to start. In terms of configuration options, these -are: - -- `--javascript.v8-contexts`: the maximum number of V8 contexts to create - (high-water mark) -- `--javascript.v8-contexts-minimum`: the minimum number of V8 contexts to - create at server start and to keep around permanently (low-water mark) - -The default values for these startup options have not been changed in ArangoDB -3.4, but depending on the actual workload, 3.4 ArangoDB instances may need -less V8 contexts than 3.3. - -As mentioned above, agency and database server nodes in a cluster does not -require V8 for any operation in 3.4, so the V8 engine is turned off entirely on -such nodes, regardless of the number of configured V8 contexts there. - -The V8 engine is still enabled on coordinator servers in a cluster and on single -server instances. Here the numbe of started V8 contexts may actually be reduced -in case a lot of the above features are used. - - -Startup option changes ----------------------- - -For arangod, the following startup options have changed: - -- the number of server threads is now configured by the following startup options: - - - `--server.minimal-threads`: determines the minimum number of request processing - threads the server will start - - `--server.maximal-threads`: determines the maximum number of request processing - threads the server will start - - The actual number of request processing threads is adjusted dynamically at runtime - and will float between `--server.minimal-threads` and `--server.maximal-threads`. - -- the default value for the existing startup option `--javascript.gc-interval` - has been increased from every 1000 to every 2000 requests, and the default value - for the option `--javascript.gc-frequency` has been increased from 30 to 60 seconds. - - This will make the V8 garbage collection run less often by default than in previous - versions, reducing CPU load a bit and leaving more V8 contexts available on average. - -- the startup option `--cluster.my-local-info` has been removed in favor of persisted - server UUIDs. - - The option `--cluster.my-local-info` was deprecated since ArangoDB 3.3. - -- the startup option `--database.check-30-revisions` was removed. It was used for - checking the revision ids of documents for having been created with ArangoDB 3.0, - which required a dump & restore migration of the data to 3.1. - - As direct upgrades from ArangoDB 3.0 to 3.4 or from 3.1 to 3.4 are not supported, - this option has been removed in 3.4. - -- the startup option `--server.session-timeout` has been obsoleted. Setting this - option will not have any effect. - -- the option `--replication.automatic-failover` was renamed to `--replication.active-failover` - - Using the old option name will still work in ArangoDB 3.4, but support for the old - option name will be removed in future versions of ArangoDB. - -- the option `--rocksdb.block-align-data-blocks` has been added - - If set to true, data blocks stored by the RocksDB engine are aligned on lesser of page - size and block size, which may waste some memory but may reduce the number of cross-page - I/Os operations. - - The default value for this option is *false*. - - As mentioned above, ArangoDB 3.4 changes the default value of the configuration option - `--rocksdb.total-write-buffer-size` to about 40% of available physical RAM, and 512MiB - for setups with less than 4GiB of RAM. In ArangoDB 3.3 this option had a default value - of `0`, which meant that the memory usage for write buffers was not limited. - - -Permissions ------------ - -The behavior of permissions for databases and collections changed: - -The new fallback rule for databases for which no access level is explicitly -specified is now: - -* Choose the higher access level of: - * A wildcard database grant - * A database grant on the `_system` database - -The new fallback rule for collections for which no access level is explicitly -specified is now: - -* Choose the higher access level of: - * Any wildcard access grant in the same database, or on "*/*" - * The access level for the current database - * The access level for the `_system` database - - -SSLv2 ------ - -Support for SSLv2 has been removed from arangod and all client tools. - -Startup will now be aborted when using SSLv2 for a server endpoint, or when connecting -with one of the client tools via an SSLv2 connection. - -SSLv2 has been disabled in the OpenSSL library by default in recent versions -because of security vulnerabilities inherent in this protocol. - -As it is not safe at all to use this protocol, the support for it has also -been stopped in ArangoDB. End users that use SSLv2 for connecting to ArangoDB -should change the protocol from SSLv2 to TLSv12 if possible, by adjusting -the value of the `--ssl.protocol` startup option. - - -Replication ------------ - -By default, database-specific and global replication appliers use a slightly -different configuration in 3.4 than in 3.3. In 3.4 the default value for the -configuration option `requireFromPresent` is now `true`, meaning the follower -will abort the replication when it detects gaps in the leader's stream of -events. Such gaps can happen if the leader has pruned WAL log files with -events that have not been fetched by a follower yet, which may happen for -example if the network connectivity between follower and leader is bad. - -Previous versions of ArangoDB 3.3 used a default value of `false` for -`requireFromPresent`, meaning that any such gaps in the replication data -exchange will not cause the replication to stop. 3.4 now stops replication by -default and writes according errors to the log. Replication can automatically -be restarted in this case by setting the `autoResync` replication configuration -option to `true`. - - -Mixed-engine clusters ---------------------- - -Starting a cluster with coordinators and DB servers using different storage -engines is not supported. Doing it anyway will now log an error and abort a -coordinator's startup. - -Previous versions of ArangoDB did not detect the usage of different storage -engines in a cluster, but the runtime behavior of the cluster was undefined. - - -Client tools ------------- - -The client tool _arangoimp_ has been renamed to _arangoimport_ for consistency. - -Release packages will still install _arangoimp_ as a symlink to _arangoimport_, -so user scripts invoking _arangoimp_ do not need to be changed to work with -ArangoDB 3.4. However, user scripts invoking _arangoimp_ should eventually be -changed to use _arangoimport_ instead, as that will be the long-term supported -way of running imports. - -The tools _arangodump_ and _arangorestore_ will now by default work with two -threads when extracting data from a server or loading data back into a server resp. -The number of threads to use can be adjusted for both tools by adjusting the -`--threads` parameter when invoking them. This change is noteworthy because in -previous versions of ArangoDB both tools were single-threaded and only processed -one collection at a time, while starting with ArangoDB 3.4 by default they will -process two collections at a time, with the intended benefit of completing their -work faster. However, this may create higher load on servers than in previous -versions of ArangoDB. If the load produced by _arangodump_ or _arangorestore_ is -higher than desired, please consider setting their `--threads` parameter to a -value of `1` when invoking them. - -In the ArangoShell, the undocumented JavaScript module `@arangodb/actions` has -been removed. This module contained the methods `printRouting` and `printFlatRouting`, -which were used for debugging purposes only. - -In the ArangoShell, the undocumented JavaScript functions `reloadAuth` and `routingCache` -have been removed from the `internal` module. - - -Foxx applications ------------------ - -The undocumented JavaScript module `@arangodb/database-version` has been -removed, so it cannot be use from Foxx applications anymore The module only -provided the current version of the database, so any client-side invocations -can easily be replaced by using the `db._version()` instead. - -The `ShapedJson` JavaScript object prototype, a remainder from ArangoDB 2.8 -for encapsulating database documents, has been removed in ArangoDB 3.4. - - -Miscellaneous changes ---------------------- - -For the MMFiles engine, the compactor thread(s) were renamed from "Compactor" -to "MMFilesCompactor". - -This change will be visible only on systems which allow assigning names to -threads. - - -Deprecated features -=================== - -The following features and APIs are deprecated in ArangoDB 3.4, and will be -removed in future versions of ArangoDB: - -* the JavaScript-based traversal REST API at `/_api/traversal` and the - underlaying traversal module `@arangodb/graph/traversal`: - - This API has several limitations (including low result set sizes) and has - effectively been unmaintained since the introduction of native AQL traversal. - - It is recommended to migrate client applications that use the REST API at - `/_api/traversal` to use AQL-based traversal queries instead. - -* the REST API for simple queries at `/_api/simple`: - - The simple queries provided by the `/_api/simple` endpoint are limited in - functionality and will internally resort to AQL queries anyway. It is advised - that client applications also use the equivalent AQL queries instead of - using the simple query API, because that is more flexible and allows greater - control of how the queries are executed. - -* the REST API for querying endpoints at `/_api/endpoint`: - - The API `/_api/endpoint` is deprecated since ArangoDB version 3.1. - For cluster mode there is `/_api/cluster/endpoints` to find all current - coordinator endpoints. - -* accessing collections via their numeric IDs instead of their names. This mostly - affects the REST APIs at - - - `/_api/collection/` - - `/_api/document/` - - `/_api/simple` - - Note that in ArangoDB 3.4 it is still possible to access collections via - their numeric ID, but the preferred way to access a collections is by its - user-defined name. - -* the REST API for WAL tailing at `/_api/replication/logger-follow`: - - The `logger-follow` WAL tailing API has several limitations. A better API - was introduced at endpoint `/_api/wal/tail` in ArangoDB 3.3. - - Client applications using the old tailing API at `/_api/replication/logger-follow` - should switch to the new API eventually. - -* the result attributes `mode` and `writeOpsEnabled` in the REST API for querying - a server's status at `/_admin/status`: - - `GET /_admin/status` returns the additional attributes `operationMode` and - `readOnly` now, which should be used in favor of the old attributes. - -* creating geo indexes via any APIs with one of the types `geo1` or `geo2`: - - The two previously known geo index types (`geo1`and `geo2`) are deprecated now. - Instead, when creating geo indexes, the type `geo` should be used. - - The types `geo1` and `geo2` will still work in ArangoDB 3.4, but may be removed - in future versions. - -* the persistent index type is marked for removal in 4.0.0 and is thus deprecated. - - This index type was added when there was only the MMFiles storage engine as - kind of a stop gap. We recommend to switch to RocksDB engine, which persists - all index types with no difference between skiplist and persistent indexes. - -* the legacy mode for Foxx applications from ArangoDB 2.8 or earlier: - - The legacy mode is described in more detail in the [Foxx manual](https://docs.arangodb.com/3.3/Manual/Foxx/LegacyMode.html). - To upgrade an existing Foxx application that still uses the legacy mode, please - follow the steps described in [the manual](https://docs.arangodb.com/3.3/Manual/Foxx/Migrating2x/). - -* the AQL geo functions `NEAR`, `WITHIN`, `WITHIN_RECTANGLE` and `IS_IN_POLYGON`: - - The special purpose `NEAR` AQL function can be substituted with the - following AQL (provided there is a geo index present on the `doc.latitude` - and `doc.longitude` attributes) since ArangoDB 3.2: - - FOR doc in geoSort - SORT DISTANCE(doc.latitude, doc.longitude, 0, 0) - LIMIT 5 - RETURN doc - - `WITHIN` can be substituted with the following AQL since ArangoDB 3.2: - - FOR doc in geoFilter - FILTER DISTANCE(doc.latitude, doc.longitude, 0, 0) < 2000 - RETURN doc - - Compared to using the special purpose AQL functions this approach has the - advantage that it is more composable, and will also honor any `LIMIT` values - used in the AQL query. - - In ArangoDB 3.4, `NEAR`, `WITHIN`, `WITHIN_RECTANGLE` and `IS_IN_POLYGON` - will still work and automatically be rewritten by the AQL query optimizer - to the above forms. However, AQL queries using the deprecated AQL functions - should eventually be adjusted. - -* using the `arangoimp` binary instead of `arangoimport` - - `arangoimp` has been renamed to `arangoimport` for consistency in ArangoDB - 3.4, and `arangoimp` is just a symbolic link to `arangoimport` now. - `arangoimp` is there for compatibility only, but client scripts should - eventually be migrated to use `arangoimport` instead. - -* the `foxx-manager` executable is deprecated and will be removed in ArangoDB 4. - - Please use foxx-cli instead: https://docs.arangodb.com/3.4/Manual/Foxx/Deployment/FoxxCLI/ diff --git a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges35.md b/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges35.md deleted file mode 100644 index 138909e4e126..000000000000 --- a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges35.md +++ /dev/null @@ -1,130 +0,0 @@ -Incompatible changes in ArangoDB 3.5 -==================================== - -It is recommended to check the following list of incompatible changes **before** -upgrading to ArangoDB 3.5, and adjust any client programs if necessary. - -The following incompatible changes have been made in ArangoDB 3.5: - -ID values in log messages -------------------------- - -By default, ArangoDB and its client tools now show a 5 digit unique ID value in -any of their log messages, e.g. - - 2019-03-25T21:23:19Z [8144] INFO [cf3f4] ArangoDB (version 3.5.0 enterprise [linux]) is ready for business. Have fun!. - -In this message, the `cf3f4` is the message's unique ID value. ArangoDB users can -use this ID to build custom monitoring or alerting based on specific log ID values. - -The presence of these ID values in log messages may confuse custom log message filtering -or routing mechanisms that parse log messages and that rely on the old log message -format. - -This can be fixed adjusting any existing log message parsers and making them aware -of the ID values. The ID values are always 5 byte strings, consisting of the characters -`[0-9a-f]`. ID values are placed directly behind the log level (e.g. `INFO`) for -general log messages that do not contain a log topic, and directly behind the log -topic for messages that contain a topic, e.g. - - 2019-03-25T21:23:19Z [8144] INFO [cf3f4] ArangoDB (version 3.5.0 enterprise [linux]) is ready for business. Have fun!. - 2019-03-25T21:23:16Z [8144] INFO {authentication} [3844e] Authentication is turned on (system only), authentication for unix sockets is turned on - -Alternatively, the log IDs can be suppressed in all log messages by setting the startup -option `--log.ids false` when starting arangod or any of the client tools. - - -Startup options ---------------- - -The hidden startup option `--rocksdb.delayed_write_rate` was renamed to the more -consistent `--rocksdb.delayed-write-rate`. When the old option name is used, the -arangod startup will be aborted with a descriptive error message. - -Web interface -------------- - -### Potentially different sort order of documents - -In the list of documents for a collection, the documents will now always be sorted -in lexicographical order of their `_key` values. An exception for keys representing -quasi-numerical values has been removed when doing the sorting in the web interface. - -Therefore a document with a key value "10" will now be displayed before a document -with a key value of "9". - -### Removal of index types "skiplist" and "persistent" (RocksDB engine) - -For the RocksDB engine, the selection of index types "persistent" and "skiplist" -has been removed from the web interface when creating new indexes. - -The index types "hash", "skiplist" and "persistent" are just aliases of each other -when using the RocksDB engine, so there is no need to offer all of them in parallel. - - -AQL ---- - -3.5 enforces the invalidation of variables in AQL queries after usage of an AQL -COLLECT statements as documented. The documentation for variable invalidation claims -that - - The COLLECT statement will eliminate all local variables in the current scope. - After COLLECT only the variables introduced by COLLECT itself are available. - -However, the described behavior was not enforced when a COLLECT was preceded by a -FOR loop that was itself preceded by a COLLECT. In the following query the final -RETURN statement accesses variable `key1` though the variable should have been -invalidated by the COLLECT directly before it: - - FOR x1 IN 1..2 - COLLECT key1 = x1 - FOR x2 IN 1..2 - COLLECT key2 = x2 - RETURN [key2, key1] - -In previous releases, this query was -parsed ok, but the contents of variable `key1` in the final RETURN statement were -undefined. - -This change is about making queries as the above fail with a parse error, as an -unknown variable `key1` is accessed here, avoiding the undefined behavior. This is -also in line with what the documentation states about variable invalidation. - -HTTP Replication APIs ---------------------- - -### New parameter for WAL tailing API - -Tailing of recent server operations via `/_api/wal/tail` gets a new parameter -`syncerId`, which helps in tracking the WAL tick of each client. If set, this -supersedes the parameter `serverId` for this purpose. The API stays backwards -compatible. - - -Miscellaneous -------------- - -### Index creation - -In previous versions of ArangoDB, if one attempted to create an index with a -specified `_id`, and that `_id` was already in use, the server would typically -return the existing index with matching `_id`. This is somewhat unintuitive, as -it would ignore if the rest of the definition did not match. This behavior has -been changed so that the server will now return a duplicate identifier error. - -### Version details output - -The attribute key `openssl-version` in the server/client tool version details -output was renamed to `openssl-version-compile-time`. - -This change affects the output produced when starting one of the ArangoDB -executables (e.g. arangod, arangosh) with the `--version` command. It also -changes the attribute name in the detailed response of the `/_api/version` REST API. - -### Overcommit settings - -On Linux, ArangoDB will now show a startup warning in case the kernel setting -`vm.overcommit_memory` is set to a value of 2 and the jemalloc memory allocator -is in use. This combination does not play well together, and may lead to the -kernel denying arangod's memory allocation requests in more cases than necessary. diff --git a/Documentation/Books/Manual/SUMMARY.md b/Documentation/Books/Manual/SUMMARY.md deleted file mode 100644 index f2a2ff103b32..000000000000 --- a/Documentation/Books/Manual/SUMMARY.md +++ /dev/null @@ -1,453 +0,0 @@ - -# Summary - -* [Introduction](README.md) -* [Highlights](Highlights.md) - -## GETTING FAMILIAR - -* [Getting Started](GettingStarted/README.md) - * [Installation](GettingStarted/Installation.md) - * [Authentication](GettingStarted/Authentication.md) - * [Web Interface](GettingStarted/WebInterface.md) - * [Databases, Collections and Documents](GettingStarted/DatabasesCollectionsDocuments.md) - * [Querying the Database](GettingStarted/QueryingTheDatabase.md) - * [Coming from SQL](GettingStarted/ComingFromSql.md) - * [Next Steps](GettingStarted/NextSteps.md) -* [Tutorials](Tutorials/README.md) - - * [ArangoDB Starter](Tutorials/Starter/README.md) - - * [Datacenter to datacenter Replication](Tutorials/DC2DC/README.md) - - * [Kubernetes](Tutorials/Kubernetes/README.md) - * [Amazon EKS](Tutorials/Kubernetes/EKS.md) - * [Google GKE](Tutorials/Kubernetes/GKE.md) - * [Azure AKS](Tutorials/Kubernetes/AKS.md) - * [DC2DC on Kubernetes](Tutorials/Kubernetes/DC2DC.md) -* [Programs & Tools](Programs/README.md) - * [ArangoDB Server](Programs/Arangod/README.md) - * [Options](Programs/Arangod/Options.md) - * [Global](Programs/Arangod/Global.md) - * [Agency](Programs/Arangod/Agency.md) - * [ArangoSearch](Programs/Arangod/Arangosearch.md) - * [Audit](Programs/Arangod/Audit.md) - * [Cache](Programs/Arangod/Cache.md) - * [Cluster](Programs/Arangod/Cluster.md) - * [Compaction](Programs/Arangod/Compaction.md) - * [Database](Programs/Arangod/Database.md) - * [Foxx](Programs/Arangod/Foxx.md) - * [Frontend](Programs/Arangod/Frontend.md) - * [HTTP](Programs/Arangod/Http.md) - * [JavaScript](Programs/Arangod/Javascript.md) - * [LDAP](Programs/Arangod/Ldap.md) - * [Log](Programs/Arangod/Log.md) - * [Nonce](Programs/Arangod/Nonce.md) - * [Query](Programs/Arangod/Query.md) - * [Random](Programs/Arangod/Random.md) - * [Replication](Programs/Arangod/Replication.md) - * [RocksDB](Programs/Arangod/Rocksdb.md) - * [Server](Programs/Arangod/Server.md) - * [SSL](Programs/Arangod/Ssl.md) - * [TCP](Programs/Arangod/Tcp.md) - * [Temp](Programs/Arangod/Temp.md) - * [TTL](Programs/Arangod/Ttl.md) - * [VST](Programs/Arangod/Vst.md) - * [WAL](Programs/Arangod/Wal.md) - * [Web Interface](Programs/WebInterface/README.md) - * [Dashboard](Programs/WebInterface/Dashboard.md) - * [Cluster](Programs/WebInterface/Cluster.md) - * [Collections](Programs/WebInterface/Collections.md) - * [Document](Programs/WebInterface/Document.md) - * [Queries](Programs/WebInterface/AqlEditor.md) - * [Graphs](Programs/WebInterface/Graphs.md) - * [Services](Programs/WebInterface/Services.md) - * [Users](Programs/WebInterface/Users.md) - * [Logs](Programs/WebInterface/Logs.md) - * [ArangoDB Shell](Programs/Arangosh/README.md) - * [Examples](Programs/Arangosh/Examples.md) - * [Details](Programs/Arangosh/Details.md) - * [Options](Programs/Arangosh/Options.md) - - * [ArangoDB Starter](Programs/Starter/README.md) - * [Options](Programs/Starter/Options.md) - * [Security](Programs/Starter/Security.md) - * [Architecture](Programs/Starter/Architecture.md) - * [Arangodump](Programs/Arangodump/README.md) - * [Examples](Programs/Arangodump/Examples.md) - * [Options](Programs/Arangodump/Options.md) - * [Maskings](Programs/Arangodump/Maskings.md) - * [Limitations](Programs/Arangodump/Limitations.md) - * [Arangorestore](Programs/Arangorestore/README.md) - * [Examples](Programs/Arangorestore/Examples.md) - * [Fast Cluster Restore](Programs/Arangorestore/FastClusterRestore.md) - * [Options](Programs/Arangorestore/Options.md) - * [Arangoimport](Programs/Arangoimport/README.md) - * [Examples JSON](Programs/Arangoimport/ExamplesJson.md) - * [Examples CSV](Programs/Arangoimport/ExamplesCsv.md) - * [Details](Programs/Arangoimport/Details.md) - * [Options](Programs/Arangoimport/Options.md) - * [Arangoexport](Programs/Arangoexport/README.md) - * [Examples](Programs/Arangoexport/Examples.md) - * [Options](Programs/Arangoexport/Options.md) - * [Arangobench](Programs/Arangobench/README.md) - * [Examples](Programs/Arangobench/Examples.md) - * [Options](Programs/Arangobench/Options.md) - * [Arangoinspect](Programs/Arangoinspect/README.md) - * [Examples](Programs/Arangoinspect/Examples.md) - * [Options](Programs/Arangoinspect/Options.md) - * [Datafile Debugger](Programs/Arango-dfdb/README.md) - * [Examples](Programs/Arango-dfdb/Examples.md) - - * [Foxx CLI](Programs/FoxxCLI/README.md) - * [Details](Programs/FoxxCLI/Details.md) - -## CORE TOPICS - -* [Data models & modeling](DataModeling/README.md) - * [Concepts](DataModeling/Concepts.md) - * [Databases](DataModeling/Databases/README.md) - * [Working with Databases](DataModeling/Databases/WorkingWith.md) - * [Notes about Databases](DataModeling/Databases/Notes.md) - * [Collections](DataModeling/Collections/README.md) - * [Collection Methods](DataModeling/Collections/CollectionMethods.md) - * [Database Methods](DataModeling/Collections/DatabaseMethods.md) - * [Documents](DataModeling/Documents/README.md) - * [Basics and Terminology](DataModeling/Documents/DocumentAddress.md) - * [Collection Methods](DataModeling/Documents/DocumentMethods.md) - * [Database Methods](DataModeling/Documents/DatabaseMethods.md) - * [Graphs, Vertices & Edges](DataModeling/GraphsVerticesEdges.md) - * [Views](DataModeling/Views/README.md) - * [Database Methods](DataModeling/Views/DatabaseMethods.md) - * [View Methods](DataModeling/Views/ViewMethods.md) - * [Naming Conventions](DataModeling/NamingConventions/README.md) - * [Database Names](DataModeling/NamingConventions/DatabaseNames.md) - * [Collection and View Names](DataModeling/NamingConventions/CollectionAndViewNames.md) - * [Document Keys](DataModeling/NamingConventions/DocumentKeys.md) - * [Attribute Names](DataModeling/NamingConventions/AttributeNames.md) - * [Operational Factors](DataModeling/OperationalFactors.md) -* [Indexing](Indexing/README.md) - * [Index Basics](Indexing/IndexBasics.md) - * [Which index to use when](Indexing/WhichIndex.md) - * [Index Utilization](Indexing/IndexUtilization.md) - * [Working with Indexes](Indexing/WorkingWithIndexes.md) - * [Hash Indexes](Indexing/Hash.md) - * [Skiplists](Indexing/Skiplist.md) - * [Persistent](Indexing/Persistent.md) - * [TTL Indexes](Indexing/Ttl.md) - * [Fulltext Indexes](Indexing/Fulltext.md) - * [Geo-spatial Indexes](Indexing/Geo.md) - * [Vertex Centric Indexes](Indexing/VertexCentric.md) -* [Transactions](Transactions/README.md) - * [Transaction invocation](Transactions/TransactionInvocation.md) - * [Passing parameters](Transactions/Passing.md) - * [Locking and isolation](Transactions/LockingAndIsolation.md) - * [Durability](Transactions/Durability.md) - * [Limitations](Transactions/Limitations.md) -* [Graphs](Graphs/README.md) - * [General Graphs](Graphs/GeneralGraphs/README.md) - * [Graph Management](Graphs/GeneralGraphs/Management.md) - * [Graph Functions](Graphs/GeneralGraphs/Functions.md) - * [SmartGraphs](Graphs/SmartGraphs/README.md) - * [SmartGraph Management](Graphs/SmartGraphs/Management.md) - * [Traversals](Graphs/Traversals/README.md) - * [Using Traversal Objects](Graphs/Traversals/UsingTraversalObjects.md) - * [Example Data](Graphs/Traversals/ExampleData.md) - * [Working with Edges](Graphs/Edges/README.md) - * [Pregel](Graphs/Pregel/README.md) -* [ArangoSearch Views](Views/ArangoSearch/README.md) - * [Getting Started](Views/ArangoSearch/GettingStarted.md) - * [Detailed Overview](Views/ArangoSearch/DetailedOverview.md) - * [Analyzers](Views/ArangoSearch/Analyzers.md) - * [Scorers](Views/ArangoSearch/Scorers.md) -* [Analyzers](Analyzers/README.md) - -## ADVANCED TOPICS - -* [Architecture](Architecture/README.md) - * [ArangoDB Deployment Modes](Architecture/DeploymentModes/README.md) - * [Single Instance](Architecture/DeploymentModes/SingleInstance/README.md) - * [Master/Slave](Architecture/DeploymentModes/MasterSlave/README.md) - * [Architecture](Architecture/DeploymentModes/MasterSlave/Architecture.md) - * [Limitations](Architecture/DeploymentModes/MasterSlave/Limitations.md) - * [Active Failover](Architecture/DeploymentModes/ActiveFailover/README.md) - * [Architecture](Architecture/DeploymentModes/ActiveFailover/Architecture.md) - * [Limitations](Architecture/DeploymentModes/ActiveFailover/Limitations.md) - * [Cluster](Architecture/DeploymentModes/Cluster/README.md) - * [Architecture](Architecture/DeploymentModes/Cluster/Architecture.md) - * [Data models](Architecture/DeploymentModes/Cluster/DataModels.md) - * [Limitations](Architecture/DeploymentModes/Cluster/Limitations.md) - - * [Datacenter to datacenter replication](Architecture/DeploymentModes/DC2DC/README.md) - * [Introduction](Architecture/DeploymentModes/DC2DC/Introduction.md) - * [Applicability](Architecture/DeploymentModes/DC2DC/Applicability.md) - * [Requirements](Architecture/DeploymentModes/DC2DC/Requirements.md) - * [Limitations](Architecture/DeploymentModes/DC2DC/Limitations.md) - * [Single Instance vs. Cluster](Architecture/SingleInstanceVsCluster.md) - * [Storage Engines](Architecture/StorageEngines.md) - * [Replication](Architecture/Replication/README.md) - * [Write-ahead log](Architecture/WriteAheadLog.md) -* [Foxx Microservices](Foxx/README.md) - * [Getting started](Foxx/GettingStarted.md) - * [Guides](Foxx/Guides/README.md) - * [Working with routers](Foxx/Guides/Routing.md) - * [Working with collections](Foxx/Guides/Collections.md) - * [Writing queries](Foxx/Guides/Queries.md) - * [Development mode](Foxx/Guides/DevelopmentMode.md) - * [Testing Foxx services](Foxx/Guides/Testing.md) - * [Foxx in a cluster](Foxx/Guides/Cluster.md) - * [Scripts and scheduling](Foxx/Guides/Scripts.md) - * [Using Node modules](Foxx/Guides/BundledNodeModules.md) - * [Using Webpack with Foxx](Foxx/Guides/Webpack.md) - * [Authentication and sessions](Foxx/Guides/Auth.md) - * [Linking services together](Foxx/Guides/Dependencies.md) - * [Working with files](Foxx/Guides/Files.md) - * [Making requests](Foxx/Guides/MakingRequests.md) - * [Access from the browser](Foxx/Guides/Browser.md) - * [Working with 2.x services](Foxx/Guides/LegacyMode.md) - * [Reference](Foxx/Reference/README.md) - * [Service manifest](Foxx/Reference/Manifest.md) - * [Service context](Foxx/Reference/Context.md) - * [Configuration](Foxx/Reference/Configuration.md) - * [Routers](Foxx/Reference/Routers/README.md) - * [Endpoints](Foxx/Reference/Routers/Endpoints.md) - * [Middleware](Foxx/Reference/Routers/Middleware.md) - * [Request](Foxx/Reference/Routers/Request.md) - * [Response](Foxx/Reference/Routers/Response.md) - * [Sessions middleware](Foxx/Reference/Sessions/README.md) - * [Session storages](Foxx/Reference/Sessions/Storages/README.md) - * [Collection storage](Foxx/Reference/Sessions/Storages/Collection.md) - * [JWT storage](Foxx/Reference/Sessions/Storages/JWT.md) - * [Session transports](Foxx/Reference/Sessions/Transports/README.md) - * [Cookie transport](Foxx/Reference/Sessions/Transports/Cookie.md) - * [Header transport](Foxx/Reference/Sessions/Transports/Header.md) - * [Related modules](Foxx/Reference/Modules/README.md) - * [Authentication](Foxx/Reference/Modules/Auth.md) - * [OAuth 1.0a](Foxx/Reference/Modules/OAuth1.md) - * [OAuth 2.0](Foxx/Reference/Modules/OAuth2.md) - * [GraphQL](Foxx/Reference/Modules/GraphQL.md) - * [Queues](Foxx/Reference/Modules/Queues.md) - * [Deployment](Foxx/Deployment.md) - * [Migrating 2.x services](Foxx/Migrating2x/README.md) - * [Migrating from pre-2.8](Foxx/Migrating2x/Wayback.md) - * [manifest.json](Foxx/Migrating2x/Manifest.md) - * [applicationContext](Foxx/Migrating2x/Context.md) - * [Repositories and Models](Foxx/Migrating2x/Repositories.md) - * [Controllers](Foxx/Migrating2x/Controllers/README.md) - * [Request context](Foxx/Migrating2x/Controllers/Endpoints.md) - * [Error handling](Foxx/Migrating2x/Controllers/Errors.md) - * [Before/After/Around](Foxx/Migrating2x/Controllers/Middleware.md) - * [Request object](Foxx/Migrating2x/Controllers/Request.md) - * [Response object](Foxx/Migrating2x/Controllers/Response.md) - * [Dependency Injection](Foxx/Migrating2x/Controllers/IoC.md) - * [Sessions](Foxx/Migrating2x/Sessions.md) - * [Auth and OAuth2](Foxx/Migrating2x/Auth.md) - * [Foxx Queries](Foxx/Migrating2x/Queries.md) -* [Satellite Collections](Satellites.md) -* [Smart Joins](SmartJoins.md) - -## OPERATIONS - -* [Installation](Installation/README.md) - * [Linux](Installation/Linux.md) - * [Operating System Configuration](Installation/LinuxOSConfiguration.md) - * [Linux OS Tuning Script Examples](Installation/LinuxOSTuningScripts.md) - * [macOS](Installation/MacOSX.md) - * [Windows](Installation/Windows.md) - * [Compiling](Installation/Compiling.md) -* [Uninstallation](Uninstallation/README.md) -* [Deployment](Deployment/README.md) - * [By ArangoDB Deployment Modes](Deployment/Modes.md) - * [Single Instance](Deployment/SingleInstance/README.md) - * [Using the ArangoDB Starter](Deployment/SingleInstance/UsingTheStarter.md) - * [Manual Start](Deployment/SingleInstance/ManualStart.md) - * [Master/Slave](Deployment/MasterSlave/README.md) - * [Manual Start](Deployment/MasterSlave/ManualStart.md) - * [Active Failover](Deployment/ActiveFailover/README.md) - * [Using the ArangoDB Starter](Deployment/ActiveFailover/UsingTheStarter.md) - * [Manual Start](Deployment/ActiveFailover/ManualStart.md) - * [Cluster](Deployment/Cluster/README.md) - * [Preliminary Information](Deployment/Cluster/PreliminaryInformation.md) - * [Using the ArangoDB Starter](Deployment/Cluster/UsingTheStarter.md) - * [Manual Start](Deployment/Cluster/ManualStart.md) - * [Kubernetes](Deployment/Cluster/Kubernetes.md) - * [Mesos, DC/OS](Deployment/Cluster/Mesos.md) - - * [Multiple Datacenters](Deployment/DC2DC/README.md) - * [Cluster](Deployment/DC2DC/Cluster.md) - * [ArangoSync Master](Deployment/DC2DC/ArangoSyncMaster.md) - * [ArangoSync Workers](Deployment/DC2DC/ArangoSyncWorkers.md) - * [Prometheus & Grafana](Deployment/DC2DC/PrometheusGrafana.md) - * [Kafka & Zookeeper](Deployment/DC2DC/KafkaZookeeper.md) - * [Standalone Agency](Deployment/StandaloneAgency/README.md) - * [By Technology](Deployment/Technology.md) - * [Manually](Deployment/Manually/README.md) - * [ArangoDB Starter](Deployment/ArangoDBStarter/README.md) - * [Docker](Deployment/Docker/README.md) - - * [Kubernetes](Deployment/Kubernetes/README.md) - * [Using the Operator](Deployment/Kubernetes/Usage.md) - * [Dashboards](Deployment/Kubernetes/Dashboards.md) - * [Deployment Resource Reference](Deployment/Kubernetes/DeploymentResource.md) - * [Driver Configuration](Deployment/Kubernetes/DriverConfiguration.md) - * [Helm](Deployment/Kubernetes/Helm.md) - * [Authentication](Deployment/Kubernetes/Authentication.md) - * [Scaling](Deployment/Kubernetes/Scaling.md) - * [Draining Nodes](Deployment/Kubernetes/Drain.md) - * [Upgrading](Deployment/Kubernetes/Upgrading.md) - * [ArangoDB Configuration & Secrets](Deployment/Kubernetes/ConfigAndSecrets.md) - * [Metrics](Deployment/Kubernetes/Metrics.md) - * [Services & Load balancer](Deployment/Kubernetes/ServicesAndLoadBalancer.md) - * [Deployment Replication Resource Reference](Deployment/Kubernetes/DeploymentReplicationResource.md) - * [Storage](Deployment/Kubernetes/Storage.md) - * [Storage Resource](Deployment/Kubernetes/StorageResource.md) - * [TLS](Deployment/Kubernetes/Tls.md) - * [Troubleshooting](Deployment/Kubernetes/Troubleshooting.md) - * [Mesos, DC/OS](Deployment/DCOS/README.md) - * [Cluster Deployments](Deployment/DCOS/ClusterMesos.md) - * [Choosing Container Engine](Deployment/DCOS/MesosContainers.md) - * [In the Cloud](Deployment/Cloud/README.md) - * [AWS](Deployment/Cloud/AWS.md) - * [Azure](Deployment/Cloud/Azure.md) - * [Production Checklist](Deployment/ProductionChecklist.md) - * [Migrating Single Instance to Cluster](Deployment/MigratingSingleInstanceCluster.md) -* [Backup & Restore](BackupRestore/README.md) -* [Upgrading](Upgrading/README.md) - * [General Upgrade Information](Upgrading/GeneralInfo/README.md) - * [Community to Enterprise Upgrade](Upgrading/CommunityToEnterprise.md) - * [OS-specific Information](Upgrading/OSSpecificInfo/README.md) - * [Upgrading on Linux](Upgrading/OSSpecificInfo/Linux.md) - * [Upgrading on macOS](Upgrading/OSSpecificInfo/MacOS.md) - * [Upgrading on Windows](Upgrading/OSSpecificInfo/Windows.md) - * [Upgrading _Starter_ Deployments](Upgrading/Starter/README.md) - * [Upgrading Manual Deployments](Upgrading/Manually/README.md) - * [Upgrading an Active Failover deployment](Upgrading/Manually/ActiveFailover.md) - * [Upgrading a Cluster](Upgrading/Manually/Cluster.md) - * [Upgrading Kubernetes Deployments](Upgrading/Kubernetes/README.md) - * [Version Specific Upgrade Information](Upgrading/VersionSpecific/README.md) - * [Upgrading to 3.4](Upgrading/VersionSpecific/Upgrading34.md) - * [Upgrading to 3.3](Upgrading/VersionSpecific/Upgrading33.md) - * [Upgrading to 3.2](Upgrading/VersionSpecific/Upgrading32.md) - * [Upgrading to 3.1](Upgrading/VersionSpecific/Upgrading31.md) - * [Upgrading to 3.0](Upgrading/VersionSpecific/Upgrading30.md) - * [Upgrading to 2.8](Upgrading/VersionSpecific/Upgrading28.md) - * [Upgrading to 2.6](Upgrading/VersionSpecific/Upgrading26.md) - * [Upgrading to 2.5](Upgrading/VersionSpecific/Upgrading25.md) - * [Upgrading to 2.4](Upgrading/VersionSpecific/Upgrading24.md) - * [Upgrading to 2.3](Upgrading/VersionSpecific/Upgrading23.md) - * [Upgrading to 2.2](Upgrading/VersionSpecific/Upgrading22.md) -* [Downgrading](Downgrading/README.md) -* [Scaling](Scaling/README.md) -* [Administration](Administration/README.md) - * [Configuration](Administration/Configuration/README.md) - * [Import & Export](Administration/ImportExport.md) - * [User Management](Administration/ManagingUsers/README.md) - * [In Arangosh](Administration/ManagingUsers/InArangosh.md) - * [Switch Storage Engine](Administration/Engine/SwitchEngine.md) - * [Master/Slave](Administration/MasterSlave/README.md) - * [Setting up](Administration/MasterSlave/SettingUp.md) - * [Replication Applier](Administration/MasterSlave/ReplicationApplier.md) - * [Per-Database Setup](Administration/MasterSlave/DatabaseSetup.md) - * [Server-Level Setup](Administration/MasterSlave/ServerLevelSetup.md) - * [Syncing Collections](Administration/MasterSlave/SyncingCollections.md) - * [Active Failover](Administration/ActiveFailover/README.md) - * [Cluster](Administration/Cluster/README.md) - - * [Datacenter to datacenter replication](Administration/DC2DC/README.md) - - * [ArangoDB Starter Administration](Administration/Starter/README.md) - * [Removal Procedure](Administration/Starter/Removal.md) - * [Recovery Procedure](Administration/Starter/Recovery.md) -* [Security](Security/README.md) - * [Security Options](Security/SecurityOptions.md) - * [Change Root Password](Security/ChangeRootPassword.md) - * [Encryption at Rest](Security/Encryption/README.md) - * [Auditing](Security/Auditing/README.md) - * [Configuration](Security/Auditing/AuditConfiguration.md) - * [Events](Security/Auditing/AuditEvents.md) - - * [Securing Starter Deployments](Security/Starter/README.md) - - * [Datacenter to datacenter replication](Security/DC2DC/README.md) -* [Monitoring](Monitoring/README.md) - * [Log Levels](Monitoring/LogLevels.md) - - * [Datacenter to datacenter replication](Monitoring/DC2DC/README.md) -* [Troubleshooting](Troubleshooting/README.md) - * [arangod](Troubleshooting/Arangod.md) - * [Emergency Console](Troubleshooting/EmergencyConsole.md) - * [Cluster](Troubleshooting/Cluster/README.md) - * [Agency Dump](Troubleshooting/Cluster/AgencyDump.md) - - * [Datacenter to datacenter replication](Troubleshooting/DC2DC/README.md) - ---- - -* [Release Notes](ReleaseNotes/README.md) - * [Version 3.5](ReleaseNotes/35.md) - * [What's New in 3.5](ReleaseNotes/NewFeatures35.md) - * [Known Issues in 3.5](ReleaseNotes/KnownIssues35.md) - * [Incompatible changes in 3.5](ReleaseNotes/UpgradingChanges35.md) - * [Version 3.4](ReleaseNotes/34.md) - * [What's New in 3.4](ReleaseNotes/NewFeatures34.md) - * [Known Issues in 3.4](ReleaseNotes/KnownIssues34.md) - * [Incompatible changes in 3.4](ReleaseNotes/UpgradingChanges34.md) - * [Version 3.3](ReleaseNotes/33.md) - * [What's New in 3.3](ReleaseNotes/NewFeatures33.md) - * [Known Issues in 3.3](ReleaseNotes/KnownIssues33.md) - * [Incompatible changes in 3.3](ReleaseNotes/UpgradingChanges33.md) - * [Version 3.2](ReleaseNotes/32.md) - * [What's New in 3.2](ReleaseNotes/NewFeatures32.md) - * [Known Issues in 3.2](ReleaseNotes/KnownIssues32.md) - * [Incompatible changes in 3.2](ReleaseNotes/UpgradingChanges32.md) - * [Version 3.1](ReleaseNotes/31.md) - * [What's New in 3.1](ReleaseNotes/NewFeatures31.md) - * [Incompatible changes in 3.1](ReleaseNotes/UpgradingChanges31.md) - * [Version 3.0](ReleaseNotes/30.md) - * [What's New in 3.0](ReleaseNotes/NewFeatures30.md) - * [Incompatible changes in 3.0](ReleaseNotes/UpgradingChanges30.md) - * [Version 2.8](ReleaseNotes/28.md) - * [What's New in 2.8](ReleaseNotes/NewFeatures28.md) - * [Incompatible changes in 2.8](ReleaseNotes/UpgradingChanges28.md) - * [Version 2.7](ReleaseNotes/27.md) - * [What's New in 2.7](ReleaseNotes/NewFeatures27.md) - * [Incompatible changes in 2.7](ReleaseNotes/UpgradingChanges27.md) - * [Version 2.6](ReleaseNotes/26.md) - * [What's New in 2.6](ReleaseNotes/NewFeatures26.md) - * [Incompatible changes in 2.6](ReleaseNotes/UpgradingChanges26.md) - * [Version 2.5](ReleaseNotes/25.md) - * [What's New in 2.5](ReleaseNotes/NewFeatures25.md) - * [Incompatible changes in 2.5](ReleaseNotes/UpgradingChanges25.md) - * [Version 2.4](ReleaseNotes/24.md) - * [What's New in 2.4](ReleaseNotes/NewFeatures24.md) - * [Incompatible changes in 2.4](ReleaseNotes/UpgradingChanges24.md) - * [Version 2.3](ReleaseNotes/23.md) - * [What's New in 2.3](ReleaseNotes/NewFeatures23.md) - * [Incompatible changes in 2.3](ReleaseNotes/UpgradingChanges23.md) - * [Version 2.2](ReleaseNotes/22.md) - * [What's New in 2.2](ReleaseNotes/NewFeatures22.md) - * [Version 2.1](ReleaseNotes/21.md) - * [What's New in 2.1](ReleaseNotes/NewFeatures21.md) -* [Appendix](Appendix/README.md) - * [References](Appendix/References/README.md) - * [db](Appendix/References/DBObject.md) - * [collection](Appendix/References/CollectionObject.md) - * [cursor](Appendix/References/CursorObject.md) - * [JavaScript Modules](Appendix/JavaScriptModules/README.md) - * [@arangodb](Appendix/JavaScriptModules/ArangoDB.md) - * [console](Appendix/JavaScriptModules/Console.md) - * [crypto](Appendix/JavaScriptModules/Crypto.md) - * [fs](Appendix/JavaScriptModules/FileSystem.md) - * [request](Appendix/JavaScriptModules/Request.md) - * [actions](Appendix/JavaScriptModules/Actions.md) - * [queries](Appendix/JavaScriptModules/Queries.md) - * [Write-ahead log](Appendix/JavaScriptModules/WAL.md) - * [Task Management](Appendix/JavaScriptModules/Tasks.md) - * [Deprecated](Appendix/Deprecated/README.md) - * [Simple Queries](Appendix/Deprecated/SimpleQueries/README.md) - * [Pagination](Appendix/Deprecated/SimpleQueries/Pagination.md) - * [Modification Queries](Appendix/Deprecated/SimpleQueries/ModificationQueries.md) - * [Geo Queries](Appendix/Deprecated/SimpleQueries/GeoQueries.md) - * [Fulltext Queries](Appendix/Deprecated/SimpleQueries/FulltextQueries.md) - * [Error codes and meanings](Appendix/ErrorCodes.md) - * [Glossary](Appendix/Glossary.md) diff --git a/Documentation/Books/Manual/Satellites.md b/Documentation/Books/Manual/Satellites.md deleted file mode 100644 index b4ed15b3aab3..000000000000 --- a/Documentation/Books/Manual/Satellites.md +++ /dev/null @@ -1,140 +0,0 @@ -Satellite Collections -===================== - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -When doing joins in an ArangoDB cluster data has to be exchanged between different servers. - -Joins will be executed on a coordinator. It will prepare an execution plan -and execute it. When executing, the coordinator will contact all shards of the -starting point of the join and ask for their data. The database servers carrying -out this operation will load all their local data and then ask the cluster for -the other part of the join. This again will be distributed to all involved shards -of this join part. - -In sum this results in much network traffic and slow results depending of the -amount of data that has to be sent throughout the cluster. - -Satellite collections are collections that are intended to address this issue. - -They will facilitate the synchronous replication and replicate all its data -to all database servers that are part of the cluster. - -This enables the database servers to execute that part of any join locally. - -This greatly improves performance for such joins at the costs of increased -storage requirements and poorer write performance on this data. - -To create a satellite collection set the *replicationFactor* of this collection -to "satellite". - -Using arangosh: - - arangosh> db._create("satellite", {"replicationFactor": "satellite"}); - -A full example --------------- - - arangosh> var explain = require("@arangodb/aql/explainer").explain - arangosh> db._create("satellite", {"replicationFactor": "satellite"}) - arangosh> db._create("nonsatellite", {numberOfShards: 8}) - arangosh> db._create("nonsatellite2", {numberOfShards: 8}) - -Let's analyse a normal join not involving satellite collections: - -``` -arangosh> explain("FOR doc in nonsatellite FOR doc2 in nonsatellite2 RETURN 1") - -Query string: - FOR doc in nonsatellite FOR doc2 in nonsatellite2 RETURN 1 - -Execution plan: - Id NodeType Site Est. Comment - 1 SingletonNode DBS 1 * ROOT - 4 CalculationNode DBS 1 - LET #2 = 1 /* json expression */ /* const assignment */ - 2 EnumerateCollectionNode DBS 0 - FOR doc IN nonsatellite /* full collection scan */ - 12 RemoteNode COOR 0 - REMOTE - 13 GatherNode COOR 0 - GATHER - 6 ScatterNode COOR 0 - SCATTER - 7 RemoteNode DBS 0 - REMOTE - 3 EnumerateCollectionNode DBS 0 - FOR doc2 IN nonsatellite2 /* full collection scan */ - 8 RemoteNode COOR 0 - REMOTE - 9 GatherNode COOR 0 - GATHER - 5 ReturnNode COOR 0 - RETURN #2 - -Indexes used: - none - -Optimization rules applied: - Id RuleName - 1 move-calculations-up - 2 scatter-in-cluster - 3 remove-unnecessary-remote-scatter -``` - -All shards involved querying the `nonsatellite` collection will fan out via the -coordinator to the shards of `nonsatellite`. In sum 8 shards will open 8 connections -to the coordinator asking for the results of the `nonsatellite2` join. The coordinator -will fan out to the 8 shards of `nonsatellite2`. So there will be quite some -network traffic. - -Let's now have a look at the same using satellite collections: - -``` -arangosh> db._query("FOR doc in nonsatellite FOR doc2 in satellite RETURN 1") - -Query string: - FOR doc in nonsatellite FOR doc2 in satellite RETURN 1 - -Execution plan: - Id NodeType Site Est. Comment - 1 SingletonNode DBS 1 * ROOT - 4 CalculationNode DBS 1 - LET #2 = 1 /* json expression */ /* const assignment */ - 2 EnumerateCollectionNode DBS 0 - FOR doc IN nonsatellite /* full collection scan */ - 3 EnumerateCollectionNode DBS 0 - FOR doc2 IN satellite /* full collection scan, satellite */ - 8 RemoteNode COOR 0 - REMOTE - 9 GatherNode COOR 0 - GATHER - 5 ReturnNode COOR 0 - RETURN #2 - -Indexes used: - none - -Optimization rules applied: - Id RuleName - 1 move-calculations-up - 2 scatter-in-cluster - 3 remove-unnecessary-remote-scatter - 4 remove-satellite-joins -``` - -In this scenario all shards of nonsatellite will be contacted. However -as the join is a satellite join all shards can do the join locally -as the data is replicated to all servers reducing the network overhead -dramatically. - -Caveats -------- - -The cluster will automatically keep all satellite collections on all servers in sync -by facilitating the synchronous replication. This means that write will be executed -on the leader only and this server will coordinate replication to the followers. -If a follower doesn't answer in time (due to network problems, temporary shutdown etc.) -it may be removed as a follower. This is being reported to the Agency. - -The follower (once back in business) will then periodically check the Agency and know -that it is out of sync. It will then automatically catch up. This may take a while -depending on how much data has to be synced. When doing a join involving the satellite -you can specify how long the DBServer is allowed to wait for sync until the query -is being aborted. - -Check [Accessing Cursors](../HTTP/AqlQueryCursor/AccessingCursors.html) -for details. - -During network failure there is also a minimal chance that a query was properly -distributed to the DBServers but that a previous satellite write could not be -replicated to a follower and the leader dropped the follower. The follower however -only checks every few seconds if it is really in sync so it might indeed deliver -stale results. diff --git a/Documentation/Books/Manual/Scaling/README.md b/Documentation/Books/Manual/Scaling/README.md deleted file mode 100644 index 879f4ae158e2..000000000000 --- a/Documentation/Books/Manual/Scaling/README.md +++ /dev/null @@ -1,31 +0,0 @@ -Scaling ArangoDB -================ - -ArangoDB is a distributed database supporting multiple data models, -and can thus be scaled horizontally, that is, by using many servers, -typically based on commodity hardware. This approach not only delivers -performance as well as capacity improvements, but also achieves -resilience by means of replication and automatic fail-over. Furthermore, -one can build systems that scale their capacity dynamically up and down -automatically according to demand. - -One can also scale ArangoDB vertically, that is, by using -ever larger servers. There is no built in limitation in ArangoDB, -for example, the server will automatically use more threads if -more CPUs are present. - -However, scaling vertically has the disadvantage that the -costs grow faster than linear with the size of the server, and -none of the resilience and dynamical capabilities can be achieved -in this way. - -Options -------- - -Several options are available to scale ArangoDB, each of them has its own pros -and cons: - -- [Master/Slave](../Architecture/DeploymentModes/MasterSlave/README.md) -- [Active Failover](../Architecture/DeploymentModes/ActiveFailover/README.md) -- [Cluster](../Architecture/DeploymentModes/Cluster/README.md) -- [Multiple Datacenters](../Architecture/DeploymentModes/DC2DC/README.md) diff --git a/Documentation/Books/Manual/Security/Auditing/AuditConfiguration.md b/Documentation/Books/Manual/Security/Auditing/AuditConfiguration.md deleted file mode 100644 index 7e935d04c4f4..000000000000 --- a/Documentation/Books/Manual/Security/Auditing/AuditConfiguration.md +++ /dev/null @@ -1,42 +0,0 @@ -Audit Configuration -=================== - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -Output ------- - -`--audit.output output` - -Specifies the target of the audit log. Possible values are - -`file://filename` where *filename* can be relative or absolute. - -`syslog://facility` or `syslog://facility/application-name` to log -into a syslog server. - -The option can be specified multiple times in order to configure the -output for multiple targets. - -Hostname --------- - -`--audit.hostname name` - -The name of the server used in audit log messages. By default the -system hostname is used. - -Verbosity ---------- - -`--log.level topic=level` - -By default, the server will log all audit events. Some low-priority events, such -as statistics operations, are logged with the `debug` log level. To keep such -events from cluttering the log, set the appropriate topic to `info`. All other -messages will be logged at the `info` level. Audit topics include -`audit-authentication`, `audit-authorization`, `audit-collection`, -`audit-database`, `audit-document`, `audit-service`, and `audit-view`. diff --git a/Documentation/Books/Manual/Security/Auditing/AuditEvents.md b/Documentation/Books/Manual/Security/Auditing/AuditEvents.md deleted file mode 100644 index 82b930ce5d9d..000000000000 --- a/Documentation/Books/Manual/Security/Auditing/AuditEvents.md +++ /dev/null @@ -1,155 +0,0 @@ -Audit Events -============ - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -Authentication --------------- - -### Unknown authentication methods - -``` -2016-10-03 15:44:23 | server1 | audit-authentication | n/a | database1 | 127.0.0.1:61525 | n/a | unknown authentication method | /_api/version -``` - -### Missing credentials - -``` -2016-10-03 15:39:49 | server1 | audit-authentication | n/a | database1 | 127.0.0.1:61498 | n/a | credentials missing | /_api/version -``` - -### Wrong credentials - -``` -2016-10-03 15:47:26 | server1 | audit-authentication | n/a | database1 | 127.0.0.1:61528 | http basic | credentials wrong | /_api/version -``` - -Please note, that the user given as fourth part is the user that requested -the login. In general it will be unavailable. - -### JWT login succeeded - -``` -2016-10-03 17:21:22 | server1 | audit-authentication | root | database1 | 127.0.0.1:64214 | http jwt | user 'root' authenticated | /_open/auth -``` - -Please note, that the user given as fourth part is the user that requested -the login. - -### JWT login failed - -``` -2016-10-03 17:21:22 | server1 | audit-authentication | root | database1 | 127.0.0.1:64214 | http jwt | user 'root' wrong credentials | /_open/auth -``` - -Please note, that the user given as fourth part is the user that requested -the login. - -Authorization -------------- - -### User not authorized to access database - -``` -2016-10-03 16:20:52 | server1 | audit-authorization | user1 | database2 | 127.0.0.1:62262 | http basic | not authorized | /_api/version -``` - -Databases ---------- - -### Create a database - -``` -2016-10-04 15:33:25 | server1 | audit-database | user1 | database1 | 127.0.0.1:56920 | http basic | create database 'database1' | ok | /_api/database -``` - -### Drop a database - -``` -2016-10-04 15:33:25 | server1 | audit-database | user1 | database1 | 127.0.0.1:56920 | http basic | delete database 'database1' | ok | /_api/database -``` - -Collections ------------ - -### Create a collection - -``` -2016-10-05 17:35:57 | server1 | audit-collection | user1 | database1 | 127.0.0.1:51294 | http basic | create collection 'collection1' | ok | /_api/collection -``` - -### Truncate a collection - -``` -2016-10-05 17:36:08 | server1 | audit-collection | user1 | database1 | 127.0.0.1:51294 | http basic | truncate collection 'collection1' | ok | /_api/collection/collection1/truncate -``` - -### Drop a collection - -``` -2016-10-05 17:36:30 | server1 | audit-collection | user1 | database1 | 127.0.0.1:51294 | http basic | delete collection 'collection1' | ok | /_api/collection/collection1 -``` - -Indexes -------- - -### Create a index - -``` -2016-10-05 18:19:40 | server1 | audit-collection | user1 | database1 | 127.0.0.1:52467 | http basic | create index in 'collection1' | ok | {"fields":["a"],"sparse":false,"type":"skiplist","unique":false} | /_api/index?collection=collection1 -``` - -### Drop a index - -``` -2016-10-05 18:18:28 | server1 | audit-collection | user1 | database1 | 127.0.0.1:52464 | http basic | drop index 'collection1/44051' | ok | /_api/index/collection1/44051 -``` - -Documents ---------- - -### Reading a single document - -``` -2016-10-04 12:27:55 | server1 | audit-document | user1 | database1 | 127.0.0.1:53699 | http basic | read document in 'collection1' | ok | /_api/document/collection1 -``` - -### Creating a single document - -``` -2016-10-04 12:27:55 | server1 | audit-document | user1 | database1 | 127.0.0.1:53699 | http basic | create document in 'collection1' | ok | /_api/document/collection1 -``` - -### Replacing a single document - -``` -2016-10-04 12:28:08 | server1 | audit-document | user1 | database1 | 127.0.0.1:53699 | http basic | replace document 'collection1/21456' | ok | /_api/document/collection1/21456?ignoreRevs=false -``` - -### Modifying a single document - -``` -2016-10-04 12:28:15 | server1 | audit-document | user1 | database1 | 127.0.0.1:53699 | http basic | modify document 'collection1/21456' | ok | /_api/document/collection1/21456?keepNull=true&ignoreRevs=false -``` - -### Deleting a single document - -``` -2016-10-04 12:28:23 | server1 | audit-document | user1 | database1 | 127.0.0.1:53699 | http basic | delete document 'collection1/21456' | ok | /_api/document/collection1/21456?ignoreRevs=false -``` - -For example, if someones tries to delete a non-existing document, it will be logged as - -``` -2016-10-04 12:28:26 | server1 | audit-document | user1 | database1 | 127.0.0.1:53699 | http basic | delete document 'collection/21456' | failed | /_api/document/collection1/21456?ignoreRevs=false -``` - -Queries -------- - -``` -2016-10-06 12:12:10 | server1 | audit-document | user1 | database1 | 127.0.0.1:54232 | http basic | query document | ok | for i in collection1 return i | /_api/cursor -``` diff --git a/Documentation/Books/Manual/Security/Auditing/README.md b/Documentation/Books/Manual/Security/Auditing/README.md deleted file mode 100644 index 0c03d9a2115d..000000000000 --- a/Documentation/Books/Manual/Security/Auditing/README.md +++ /dev/null @@ -1,32 +0,0 @@ -Auditing -======== - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -Auditing allows you to monitor access to the database in detail. In general -audit logs are of the form - -``` -2016-01-01 12:00:00 | server | username | database | client-ip | authentication | text1 | text2 | ... -``` - -The *time-stamp* is in GMT. This allows to easily match log entries from servers -in different time zones. - -The name of the *server*. You can specify a custom name on startup. Otherwise -the default hostname is used. - -The *username* is the (authenticated or unauthenticated) name supplied by the -client. A dash `-` is printed if no name was given by the client. - -The *database* describes the database that was accessed. Please note that there -are no database crossing queries. Each access is restricted to one database. - -The *client-ip* describes the source of the request. - -The *authentication* details the methods used to authenticate the user. - -Details about the requests follow in the additional fields. diff --git a/Documentation/Books/Manual/Security/ChangeRootPassword.md b/Documentation/Books/Manual/Security/ChangeRootPassword.md deleted file mode 100644 index b0b0b5ecfc12..000000000000 --- a/Documentation/Books/Manual/Security/ChangeRootPassword.md +++ /dev/null @@ -1,31 +0,0 @@ -# How-to Reset Root Password - -One can reset the _root_ password in the following way: - -- Stop the Server -- Set `authentication=false` in the `arangod.conf` file -- Restart the Server - - **Note:** you might need to take any needed precaution to avoid this server can be accessed from outside as currently authentication is temporarily disabled. You might do this by disabling network access or using _localhost_ for the binding (`--server.endpoint tcp://127.0.0.1:8529`) -- Change the password using the ArangoDB Web UI, or using the following command via `Arangosh`: - -``` -require("org/arangodb/users").update("root", "newpassword"); -``` - -This command should return: - -``` -{ - "user" : "root", - "active" : true, - "extra" : { - }, - "code" : 200 -} -``` - -- Set `authentication=true` in the `arangod.conf` file -- Restart the server -- Test the connection - -Please note that the above procedure is meant for _Single Instance_. If you are using an _ArangoDB Cluster_ or _Active Failover_ you should disable and enable authentication in the `arangod.conf` file of each node. Changes to the `arangod.conf` file under the path `etc/arangodb3/arangod.conf` in _Cluster_ and _Active Failover_ deployments will not work in this case. diff --git a/Documentation/Books/Manual/Security/DC2DC/README.md b/Documentation/Books/Manual/Security/DC2DC/README.md deleted file mode 100644 index ab12d1f11aab..000000000000 --- a/Documentation/Books/Manual/Security/DC2DC/README.md +++ /dev/null @@ -1,203 +0,0 @@ - -# Datacenter to datacenter Security - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -This section includes information related to the _datacenter to datacenter replication_ -security. - -For a general introduction to the _datacenter to datacenter replication_, please -refer to the [Datacenter to datacenter replication](../../Architecture/DeploymentModes/DC2DC/README.md) -chapter. - -## Firewall settings - -The components of _ArangoSync_ use (TCP) network connections to communicate with each other. -Below you'll find an overview of these connections and the TCP ports that should be accessible. - -1. The sync masters must be allowed to connect to the following components - within the same datacenter: - - - ArangoDB agents and coordinators (default ports: `8531` and `8529`) - - Kafka brokers (only when using `kafka` type message queue) (default port `9092`) - - Sync workers (default port `8729`) - - Additionally the sync masters must be allowed to connect to the sync masters in the other datacenter. - - By default the sync masters will operate on port `8629`. - -1. The sync workers must be allowed to connect to the following components within the same datacenter: - - - ArangoDB coordinators (default port `8529`) - - Kafka brokers (only when using `kafka` type message queue) (default port `9092`) - - Sync masters (default port `8629`) - - By default the sync workers will operate on port `8729`. - - Additionally (when using `kafka` type message queue) the sync workers must be allowed to - connect to the Kafka brokers in the other datacenter. - -1. Kafka (when using `kafka` type message queue) - - The kafka brokers must be allowed to connect to the following components within the same datacenter: - - - Other kafka brokers (default port `9092`) - - Zookeeper (default ports `2181`, `2888` and `3888`) - - The default port for kafka is `9092`. The default kafka installation will also expose some prometheus - metrics on port `7071`. To gain more insight into kafka open this port for your prometheus - installation. - -1. Zookeeper (when using `kafka` type message queue) - - The zookeeper agents must be allowed to connect to the following components within the same datacenter: - - - Other zookeeper agents - - The setup here is a bit special as zookeeper uses 3 ports for different operations. All agents need to - be able to connect to all of these ports. - - By default Zookeeper uses: - - - port `2181` for client communication - - port `2888` for follower communication - - port `3888` for leader elections - -## Certificates - -Digital certificates are used in many places in _ArangoSync_ for both encryption -and authentication. - -
In ArangoSync all network connections are using Transport Layer Security (TLS), -a set of protocols that ensure that all network traffic is encrypted. -For this TLS certificates are used. The server side of the network connection -offers a TLS certificate. This certificate is (often) verified by the client side of the network -connection, to ensure that the certificate is signed by a trusted Certificate Authority (CA). -This ensures the integrity of the server. -
In several places additional certificates are used for authentication. In those cases -the client side of the connection offers a client certificate (on top of an existing TLS connection). -The server side of the connection uses the client certificate to authenticate -the client and (optionally) decides which rights should be assigned to the client. - -Note: ArangoSync does allow the use of certificates signed by a well know CA (eg. verisign) -however it is more convenient (and common) to use your own CA. - -### Formats - -All certificates are x509 certificates with a public key, a private key and -an optional chain of certificates used to sign the certificate. This chain is -typically provided by the Certificate Authority (CA). -
Depending on their use, certificates stored in a different format. - -The following formats are used: - -- Public key only (`.crt`): A file that contains only the public key of - a certificate with an optional chain of parent certificates (public keys of certificates - used to signed the certificate). -
Since this format contains only public keys, it is not a problem if its contents - are exposed. It must still be store it in a safe place to avoid losing it. -- Private key only (`.key`): A file that contains only the private key of a certificate. -
It is vital to protect these files and store them in a safe place. -- Keyfile with public & private key (`.keyfile`): A file that contains the public key of - a certificate, an optional chain of parent certificates and a private key. -
Since this format also contains a private key, it is vital to protect these files - and store them in a safe place. -- Java keystore (`.jks`): A file containing a set of public and private keys. -
It is possible to protect access to the content of this file using a keystore password. -
Since this format can contain private keys, it is vital to protect these files - and store them in a safe place (even when its content is protected with a keystore password). - -### Creating certificates - -ArangoSync provides commands to create all certificates needed. - -#### TLS server certificates - -To create a certificate used for TLS servers in the **keyfile** format, -you need the public key of the CA (`--cacert`), the private key of -the CA (`--cakey`) and one or more hostnames (or IP addresses). -Then run: - -```bash -arangosync create tls keyfile \ - --cacert=my-tls-ca.crt --cakey=my-tls-ca.key \ - --host= \ - --keyfile=my-tls-cert.keyfile -``` - -Make sure to store the generated keyfile (`my-tls-cert.keyfile`) in a safe place. - -To create a certificate used for TLS servers in the **crt** & **key** format, -you need the public key of the CA (`--cacert`), the private key of -the CA (`--cakey`) and one or more hostnames (or IP addresses). -Then run: - -```bash -arangosync create tls certificate \ - --cacert=my-tls-ca.crt --cakey=my-tls-ca.key \ - --host= \ - --cert=my-tls-cert.crt \ - --key=my-tls-cert.key \ -``` - -Make sure to protect and store the generated files (`my-tls-cert.crt` & `my-tls-cert.key`) in a safe place. - -#### Client authentication certificates - -To create a certificate used for client authentication in the **keyfile** format, -you need the public key of the CA (`--cacert`), the private key of -the CA (`--cakey`) and one or more hostnames (or IP addresses) or email addresses. -Then run: - -```bash -arangosync create client-auth keyfile \ - --cacert=my-client-auth-ca.crt --cakey=my-client-auth-ca.key \ - [--host= | --email=] \ - --keyfile=my-client-auth-cert.keyfile -``` - -Make sure to protect and store the generated keyfile (`my-client-auth-cert.keyfile`) in a safe place. - -#### CA certificates - -To create a CA certificate used to **sign TLS certificates**, run: - -```bash -arangosync create tls ca \ - --cert=my-tls-ca.crt --key=my-tls-ca.key -``` - -Make sure to protect and store both generated files (`my-tls-ca.crt` & `my-tls-ca.key`) in a safe place. - -To create a CA certificate used to **sign client authentication certificates**, run: - -```bash -arangosync create client-auth ca \ - --cert=my-client-auth-ca.crt --key=my-client-auth-ca.key -``` - -Make sure to protect and store both generated files (`my-client-auth-ca.crt` & `my-client-auth-ca.key`) -in a safe place. -
Note: CA certificates have a much longer lifetime than normal certificates. -Therefore even more care is needed to store them safely. - -### Renewing certificates - -All certificates have meta information in them the limit their use in function, -target & lifetime. -
A certificate created for client authentication (function) cannot be used as a TLS server certificate -(same is true for the reverse). -
A certificate for host `myserver` (target) cannot be used for host `anotherserver`. -
A certificate that is valid until October 2017 (lifetime) cannot be used after October 2017. - -If anything changes in function, target or lifetime you need a new certificate. - -The procedure for creating a renewed certificate is the same as for creating a "first" certificate. -
After creating the renewed certificate the process(es) using them have to be updated. -This mean restarting them. All ArangoSync components are designed to support stopping and starting -single instances, but do not restart more than 1 instance at the same time. -As soon as 1 instance has been restarted, give it some time to "catch up" before restarting -the next instance. diff --git a/Documentation/Books/Manual/Security/Encryption/README.md b/Documentation/Books/Manual/Security/Encryption/README.md deleted file mode 100644 index 7cab3d8ed887..000000000000 --- a/Documentation/Books/Manual/Security/Encryption/README.md +++ /dev/null @@ -1,100 +0,0 @@ -# Encryption at Rest - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -When you store sensitive data in your ArangoDB database, you want -to protect that data under all circumstances. -At runtime you will protect it with SSL transport encryption and strong authentication, -but when the data is already on disk, you also need protection. -That is where the Encryption feature comes in. - -The Encryption feature of ArangoDB will encrypt all data that ArangoDB -is storing in your database before it is written to disk. - -The data is encrypted with AES-256-CTR, which is a strong encryption -algorithm, that is very suitable for multi-processor environments. This means that -your data is safe, but your database is still fast, even under load. - -Most modern CPU's have builtin support for hardware AES encryption, which makes it even faster. - -The encryption feature is supported by all ArangoDB deployment modes. - -## Limitations - -The encryption feature has the following limitations: - -- Encrypting a single collection is not supported: all the databases are - encrypted. -- It is not possible to enable encryption at runtime: if you have existing - data you will need to take a backup first, then enable encryption and - start your server on an empty data-directory, and finally restore your - backup. -- The Encryption feature requires the RocksDB storage engine. - -## Encryption keys - -The encryption feature of ArangoDB requires a single 32-byte key per server. -It is recommended to use a different key for each server (when operating in a cluster configuration). -Make sure to protect these keys! - -That means: - -- Do not write them to persistent disks or your server(s), always store them on an in-memory (`tmpfs`) filesystem. -- Transport your keys safely to your server(s). There are various tools for managing secrets like this (e.g. vaultproject.io). -- Store a copy of your key offline in a safe place. If you lose your key, there is NO way to get your data back. - -## Configuration - -To activate encryption of your database, you need to supply an -encryption key to the server. - -Make sure to pass this option the very first time you start your -database. You cannot encrypt a database that already exists. - -Note: You also have to activate the RocksDB storage engine. - -### Encryption key stored in file - -Pass the following option to `arangod`: - -``` -$ arangod \ - --rocksdb.encryption-keyfile=/mytmpfs/mySecretKey \ - --server.storage-engine=rocksdb -``` - -The file `/mytmpfs/mySecretKey` must contain the encryption key. This -file must be secured, so that only `arangod` can access it. You should -also ensure that in case some-one steals the hardware, he will not be -able to read the file. For example, by encryption `/mytmpfs` or -creating a in-memory file-system under `/mytmpfs`. - -### Encryption key generated by a program - -Pass the following option to `arangod`: - -``` -$ arangod \ - --rocksdb.encryption-key-generator=path-to-my-generator \ - --server.storage-engine=rocksdb -``` - -The program `path-to-my-generator` output the encryption on standard -output and exit. - - -## Creating keys - -The encryption keyfile must contain 32 bytes of random data. - -You can create it with a command line this. - -``` -dd if=/dev/random bs=1 count=32 of=yourSecretKeyFile -``` - -For security, it is best to create these keys offline (away from your database servers) and -directly store them in your secret management tool. diff --git a/Documentation/Books/Manual/Security/README.md b/Documentation/Books/Manual/Security/README.md deleted file mode 100644 index cf003bc06f63..000000000000 --- a/Documentation/Books/Manual/Security/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# Security - -- [Security Options](SecurityOptions.md) -- [Change Root Password](ChangeRootPassword.md) -- [Encryption at Rest](Encryption/README.md) -- [Auditing](Auditing/README.md) -- [Securing Starter Deployments](Starter/README.md) -- [Datacenter to datacenter Security](DC2DC/README.md) diff --git a/Documentation/Books/Manual/Security/SecurityOptions.md b/Documentation/Books/Manual/Security/SecurityOptions.md deleted file mode 100644 index 9f1f697be941..000000000000 --- a/Documentation/Books/Manual/Security/SecurityOptions.md +++ /dev/null @@ -1,157 +0,0 @@ -# Server security options - -_arangod_ provides a variety of options to make a setup more secure. -Administrators can use these options to limit access to certain ArangoDB -server functionality as well as providing the leakage of information about -the environment that a server is running in. - -## General security options - -The following security options are available: - -- `--server.harden` - If this option is set to `true` and authentication is enabled, non-admin users - will be denied access to the following REST APIs: - - * `/_admin/log` - * `/_admin/log/level` - * `/_admin/status` - * `/_admin/statistics` - * `/_admin/statistics-description` - * `/_api/engine/stats` - - Additionally, no version details will be revealed by the version REST API at - `/_api/version`. - ` - The default value for this option is `false`. - -## JavaScript security options - -`arangod` has several options that allow you to make your installation more -secure when it comes to running application code in it. Below you will find -an overview of the relevant options. - -### Blacklist and whitelists - -Several options exists to restrict JavaScript application code functionality -to just certain allowed subsets. Which subset of functionality is available -can be controlled via blacklisting and whitelisting access to individual -components. Blacklists can be used to disallow access to dedicated functionality, -whereas whitelists can be used to explicitly allow access to certain functionality. - -If an item is covered by both a blacklist and a whitelist, the whitelist will -overrule and access to the functionality will be allowed. - -Values for blacklist and whitelist options need to be specified as ECMAScript -regular expressions. Each option can be used multiple times. In this case, -the individual values for each option will be combined with a _logical or_. - -For example, the following combination of startup options - - --javascript.startup-options-whitelist "^server\." - --javascript.startup-options-whitelist "^log\." - --javascript.startup-options-blacklist "^javascript\." - --javascript.startup-options-blacklist "endpoint" - -will resolve internally to the following regular expressions: - -``` ---javascript.startup-options-whitelist = "^server\.|^log\." ---javascript.startup-options-blacklist = "^javascript\.|endpoint" -``` - -Access to directories and files from JavaScript operations is only -controlled via a whitelist, which can be specified via the startup -option `--javascript.files-whitelist`. - -For example, when using the following startup options - - --javascript.startup-options-whitelist "^/etc/required/" - --javascript.startup-options-whitelist "^/etc/mtab/" - -all files in the directories `/etc/required` and `/etc/mtab` plus their -subdirectories will be accessible, while access to files in any other directories -will be disallowed from JavaScript operations, with the following exceptions: - -- ArangoDB's temporary directory: JavaScript code is given access to this - directory for storing temporary files. The temporary directory location - can be specified explicitly via the `--temp.path` option at startup. - If the option is not specified, ArangoDB will automatically use a subdirectory - of the system's temporary directory). -- ArangoDB's own JavaScript code, shipped with the ArangoDB release packages. - Files in this directory and its subdirectories will be readable for JavaScript - code running in ArangoDB. The exact path can be specified by the startup option - `--javascript.startup-directory`. - -### Options for blacklisting and whitelisting - -The following options are available for blacklisting and whitelisting access -to dedicated functionality for application code: - -- `--javascript.startup-options-whitelist` and `--javascript.startup-options-blacklist`: - These options control which startup options will be exposed to JavaScript code, - following above rules for blacklists and whitelists. - -- `--javascript.environment-variables-whitelist` and `--javascript.environment-variables-blacklist`: - These options control which environment variables will be exposed to JavaScript - code, following above rules for blacklists and whitelists. - -- `--javascript.endpoints-whitelist` and `--javascript.endpoints-blacklist`: - These options control which endpoints can be used from within the `@arangodb/request` - JavaScript module. - Endpoint values are passed into the filter in a normalized format starting - with either of the prefixes `tcp://`, `ssl://`, `unix://` or `srv://`. - Note that for HTTP/SSL-based endpoints the port number will be included too, - and that the endpoint can be specified either as an IP address or host name - from application code. - -- `--javascript.files-whitelist`: - This option controls which filesystem paths can be accessed from JavaScript code. - -### Additional JavaScript security options - -In addition to the blacklisting and whitelisting security options, the following -extra options are available for locking down JavaScript access to server functionality: - -- `--javascript.allow-port-testing`: - If set to `true`, this option enables the `testPort` JavaScript function in the - `internal` module. The default value is `false`. - -- `--javascript.allow-external-process-control`: - If set to `true`, this option allows the execution and control of external processes - from JavaScript code via the functions from the `internal` module: - - - executeExternal - - executeExternalAndWait - - getExternalSpawned - - killExternal - - suspendExternal - - continueExternal - - statusExternal - -- `--javascript.harden`: - If set to `true`, this setting will deactivate the following JavaScript functions - which may leak information about the environment: - - - `internal.getPid()` - - `internal.logLevel()`. - - The default value is `false`. - -## Security options for managing Foxx applications - -The following options are available for controlling the installation of Foxx applications -in an ArangoDB server: - -- `--foxx.api`: - If set to `false`, this option disables the Foxx management API, which will make it - impossible to install and uninstall Foxx applications. Setting the option to `false` - will also deactivate the "Services" section in the web interface. - The default value is `true`, meaning that Foxx apps can be installed and uninstalled. - -- `--foxx.store`: - If set to `false`, this option disables the Foxx app store in ArangoDB's web interface, - which will also prevent ArangoDB and its web interface from making calls to the main Foxx - application Github repository at https://github.com/arangodb/foxx-apps. - The default value is `true`. - diff --git a/Documentation/Books/Manual/Security/Starter/README.md b/Documentation/Books/Manual/Security/Starter/README.md deleted file mode 100644 index 1af555414bba..000000000000 --- a/Documentation/Books/Manual/Security/Starter/README.md +++ /dev/null @@ -1,35 +0,0 @@ - -Securing Starter Deployments -============================ - -The password that is set for the _root_ user during the installation of the ArangoDB -package has no effect in case of deployments done with the tool _ArangoDB Starter_, -as this tool creates new database directories and configuration files that are -separate from those created by the stand-alone installation. - -Assuming you have enabled authentication in your _Starter_ deployment (using `--auth.jwt-secret=`), by default -the _root_ user will be created with an _empty_ password. - -In order to the change the password of the _root_ user, you can: - -- Open the ArangoDB web UI and change the password from there. [More information](../../Programs/WebInterface/Users.md). -- Open an ArangoSH shell and use the function _users.replace_. [More information](../../Administration/ManagingUsers/InArangosh.md#replace). - -In case you would like to automate the _root_ password change, you might use the -_--javascript.execute-string_ option of the _arangosh_ binary, e.g.: - -```bash -arangosh --server.endpoint your-server-endpoint \ - --server.password "" \ - --javascript.execute-string 'require("org/arangodb/users").update("root", "mypwd");' -``` - -where "mypwd" is the new password you want to set. - -If your _Starter_ deployment has authentication turned off, it is suggested to -turn it on using a _JWT secret_ file. For more information on this topic, please -refer to the _Starter_ [Option](../../Programs/Starter/Options.md#authentication-options) page. - -Note that you cannot easily turn authentication on/off once your deployment -has started for the first time. It is possible to stop all _Starters_ and then -manually modify all the `arangod.conf` files in yor data directory, but this is not recommended. diff --git a/Documentation/Books/Manual/SmartJoins.md b/Documentation/Books/Manual/SmartJoins.md deleted file mode 100644 index 9d02135ab335..000000000000 --- a/Documentation/Books/Manual/SmartJoins.md +++ /dev/null @@ -1,284 +0,0 @@ -Smart Joins -=========== - -Introduced in: v3.4.5, v3.5.0 - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -When doing joins in an ArangoDB cluster, data has to be exchanged between different servers. - -Joins between different collections in a cluster normally require roundtrips between the -shards of these collections for fetching the data. Requests are routed through an extra -coordinator hop. - -For example, with two collections *c1* and *c2* with 4 shards each, the coordinator will -initially contact the 4 shards of *c1*. In order to perform the join, the DBServer nodes -which manage the actual data of *c1* need to pull the data from the other collection, *c2*. -This causes extra roundtrips via the coordinator, which will then pull the data for *c2* -from the responsible shards: - - arangosh> db._explain("FOR doc1 IN c1 FOR doc2 IN c2 FILTER doc1._key == doc2._key RETURN doc1"); - - Query String: - FOR doc1 IN c1 FOR doc2 IN c2 FILTER doc1._key == doc2._key RETURN doc1 - - Execution plan: - Id NodeType Site Est. Comment - 1 SingletonNode DBS 1 * ROOT - 3 EnumerateCollectionNode DBS 0 - FOR doc2 IN c2 /* full collection scan, 4 shard(s) */ - 14 RemoteNode COOR 0 - REMOTE - 15 GatherNode COOR 0 - GATHER - 8 ScatterNode COOR 0 - SCATTER - 9 RemoteNode DBS 0 - REMOTE - 7 IndexNode DBS 0 - FOR doc1 IN c1 /* primary index scan, 4 shard(s) */ - 10 RemoteNode COOR 0 - REMOTE - 11 GatherNode COOR 0 - GATHER - 6 ReturnNode COOR 0 - RETURN doc1 - -This is the general query execution, and it makes sense if there is no further -information available about how the data is actually distributed to the individual -shards. It works in case *c1* and *c2* have a different amount of shards, or use -different shard keys or strategies. However, it comes with the additional cost of -having to do 4 x 4 requests to perform the join. - - -Sharding two collections identically using distributeShardsLike ---------------------------------------------------------------- - -In the specific case that the two collections have the same number of shards, the -data of the two collections can be co-located on the same server for the same shard -key values. In this case the extra hop via the coordinator will not be necessary. - -The query optimizer will remove the extra hop for the join in case it can prove -that data for the two collections is co-located. - -The first step is thus to make the two collections shard their data alike. This can -be achieved by making the `distributeShardsLike` attribute of one of the collections -refer to the other collection. - -Here is an example setup for this, using arangosh: - - arangosh> db._create("c1", {numberOfShards: 4, shardKeys: ["_key"]}); - arangosh> db._create("c2", {shardKeys: ["_key"], distributeShardsLike: "c1"}); - -Now the collections *c1* and *c2* will not only have the same shard keys, but they -will also locate their data for the same shard keys values on the same server. - -Let's check how the data actually gets distributed now. We first confirm that the -two collections have 4 shards each, which in this example are evenly distributed -across two servers: - - arangosh> db.c1.shards(true) - { - "s2011661" : [ - "PRMR-64d19f43-3aa0-4abb-81f6-4b9966d32175" - ], - "s2011662" : [ - "PRMR-5f30caa0-4c93-4fdd-98f3-a2130c1447df" - ], - "s2011663" : [ - "PRMR-64d19f43-3aa0-4abb-81f6-4b9966d32175" - ], - "s2011664" : [ - "PRMR-5f30caa0-4c93-4fdd-98f3-a2130c1447df" - ] - } - - arangosh> db.c2.shards(true) - { - "s2011666" : [ - "PRMR-64d19f43-3aa0-4abb-81f6-4b9966d32175" - ], - "s2011667" : [ - "PRMR-5f30caa0-4c93-4fdd-98f3-a2130c1447df" - ], - "s2011668" : [ - "PRMR-64d19f43-3aa0-4abb-81f6-4b9966d32175" - ], - "s2011669" : [ - "PRMR-5f30caa0-4c93-4fdd-98f3-a2130c1447df" - ] - } - -Because we have told both collections that distribute their data alike, their -shards will now also be populated alike: - - arangosh> for (i = 0; i < 100; ++i) { - db.c1.insert({ _key: "test" + i }); - db.c2.insert({ _key: "test" + i }); - } - - arangosh> db.c1.count(true); - { - "s2011664" : 22, - "s2011661" : 21, - "s2011663" : 27, - "s2011662" : 30 - } - - arangosh> db.c2.count(true); - { - "s2011669" : 22, - "s2011666" : 21, - "s2011668" : 27, - "s2011667" : 30 - } - -We can see that shard 1 of *c1* ("s2011664") has the same number of documents as -shard 1 of *c2* ("s20116692), that shard 2 of *c1* ("s2011661") has the same -number of documents as shard2 of *c2* ("s2011666") etc. -Additionally, we can see from the shard-to-server distribution above that the -corresponding shards from *c1* and *c2* always reside on the same node. -This is a precondition for running joins locally, and thanks to the effects of -`distributeShardsLike` it is now satisfied! - - -Smart joins using distributeShardsLike --------------------------------------- - -With the two collections in place like this, an AQL query that uses a FILTER condition -that refers from the shard key of the one collection to the shard key of the other collection -and compares the two shard key values by equality is eligible for the query -optimizer's "smart-join" optimization: - - arangosh> db._explain("FOR doc1 IN c1 FOR doc2 IN c2 FILTER doc1._key == doc2._key RETURN doc1"); - - Query String: - FOR doc1 IN c1 FOR doc2 IN c2 FILTER doc1._key == doc2._key RETURN doc1 - - Execution plan: - Id NodeType Site Est. Comment - 1 SingletonNode DBS 1 * ROOT - 3 EnumerateCollectionNode DBS 0 - FOR doc2 IN c2 /* full collection scan, 4 shard(s) */ - 7 IndexNode DBS 0 - FOR doc1 IN c1 /* primary index scan, 4 shard(s) */ - 10 RemoteNode COOR 0 - REMOTE - 11 GatherNode COOR 0 - GATHER - 6 ReturnNode COOR 0 - RETURN doc1 - -As can be seen above, the extra hop via the coordinator is gone here, which will mean -less cluster-internal traffic and a faster response time. - - -Smart joins will also work if the shard key of the second collection is not *_key*, -and even for non-unique shard key values, e.g.: - - arangosh> db._create("c1", {numberOfShards: 4, shardKeys: ["_key"]}); - arangosh> db._create("c2", {shardKeys: ["parent"], distributeShardsLike: "c1"}); - arangosh> db.c2.ensureIndex({ type: "hash", fields: ["parent"] }); - arangosh> for (i = 0; i < 100; ++i) { - db.c1.insert({ _key: "test" + i }); - for (j = 0; j < 10; ++j) { - db.c2.insert({ parent: "test" + i }); - } - } - - arangosh> db._explain("FOR doc1 IN c1 FOR doc2 IN c2 FILTER doc1._key == doc2.parent RETURN doc1"); - - Query String: - FOR doc1 IN c1 FOR doc2 IN c2 FILTER doc1._key == doc2.parent RETURN doc1 - - Execution plan: - Id NodeType Site Est. Comment - 1 SingletonNode DBS 1 * ROOT - 3 EnumerateCollectionNode DBS 2000 - FOR doc2 IN c2 /* full collection scan, 4 shard(s) */ - 7 IndexNode DBS 2000 - FOR doc1 IN c1 /* primary index scan, 4 shard(s) */ - 10 RemoteNode COOR 2000 - REMOTE - 11 GatherNode COOR 2000 - GATHER - 6 ReturnNode COOR 2000 - RETURN doc1 - - -Smart joins using smartJoinAttribute ------------------------------------- - -In case the join on the second collection must be performed on a non-shard key -attribute, there is the option to specify a *smartJoinAttribute* for the collection. -Note that for this case, setting *distributeShardsLike* is still required here, and that that -only a single *shardKeys* attribute can be used. -The single attribute name specified in the *shardKeys* attribute for the collection must end -with a colon character then. - -This *smartJoinAttribute* must be populated for all documents in the collection, -and must always contain a string value. The value of the *_key* attribute for each -document must consist of the value of the *smartJoinAttribute*, a colon character -and then some other user-defined key component. - -The setup thus becomes: - - arangosh> db._create("c1", {numberOfShards: 4, shardKeys: ["_key"]}); - arangosh> db._create("c2", {shardKeys: ["_key:"], smartJoinAttribute: "parent", distributeShardsLike: "c1"}); - arangosh> db.c2.ensureIndex({ type: "hash", fields: ["parent"] }); - arangosh> for (i = 0; i < 100; ++i) { - db.c1.insert({ _key: "test" + i }); - db.c2.insert({ _key: "test" + i + ":" + "ownKey" + i, parent: "test" + i }); - } - -Failure to populate the *smartJoinAttribute* with a string or not at all will lead -to a document being rejected on insert, update or replace. Similarly, failure to -prefix a document's *_key* attribute value with the value of the *smartJoinAttribute* -will also lead to the document being rejected: - - arangosh> db.c2.insert({ parent: 123 }); - JavaScript exception in file './js/client/modules/@arangodb/arangosh.js' at 99,7: ArangoError 4008: smart join attribute not given or invalid - - arangosh> db.c2.insert({ _key: "123:test1", parent: "124" }); - JavaScript exception in file './js/client/modules/@arangodb/arangosh.js' at 99,7: ArangoError 4007: shard key value must be prefixed with the value of the smart join attribute - -The join can now be performed via the collection's *smartJoinAttribute*: - - arangosh> db._explain("FOR doc1 IN c1 FOR doc2 IN c2 FILTER doc1._key == doc2.parent RETURN doc1") - - Query String: - FOR doc1 IN c1 FOR doc2 IN c2 FILTER doc1._key == doc2.parent RETURN doc1 - - Execution plan: - Id NodeType Site Est. Comment - 1 SingletonNode DBS 1 * ROOT - 3 EnumerateCollectionNode DBS 101 - FOR doc2 IN c2 /* full collection scan, 4 shard(s) */ - 7 IndexNode DBS 101 - FOR doc1 IN c1 /* primary index scan, 4 shard(s) */ - 10 RemoteNode COOR 101 - REMOTE - 11 GatherNode COOR 101 - GATHER - 6 ReturnNode COOR 101 - RETURN doc1 - - -Restricting smart joins to a single shard ------------------------------------------ - -If a FILTER condition is used on one of the shard keys, the optimizer will also try -to restrict the queries to just the required shards: - - arangosh> db._explain("FOR doc1 IN c1 FOR doc2 IN c2 FILTER doc1._key == 'test' && doc1._key == doc2.value RETURN doc1"); - - Query String: - FOR doc1 IN c1 FOR doc2 IN c2 FILTER doc1._key == 'test' && doc1._key == doc2.value - RETURN doc1 - - Execution plan: - Id NodeType Site Est. Comment - 1 SingletonNode DBS 1 * ROOT - 8 IndexNode DBS 1 - FOR doc1 IN c1 /* primary index scan, shard: s2010246 */ - 7 IndexNode DBS 1 - FOR doc2 IN c2 /* primary index scan, scan only, shard: s2010253 */ - 12 RemoteNode COOR 1 - REMOTE - 13 GatherNode COOR 1 - GATHER - 6 ReturnNode COOR 1 - RETURN doc1 - - -Limitations ------------ - -The smart join optimization is currently triggered only for data selection queries, -but not for any data-manipulation operations such as INSERT, UPDATE, REPLACE, REMOVE -or UPSERT, neither traversals, subqueries or views. - -It will only be applied when joining two collections with an identical sharding setup. -This requires the second collection to be created with its *distributeShardsLike* -attribute pointing to the first collection. - -It is restricted to be used with simple shard key attributes (such as `_key`, `productId`), -but not with nested attributes (e.g. `name.first`). There should be exactly one shard -key attribute defined for each collection. - -Finally, the smart join optimization requires that the collections are joined on their -shard key attributes (or smartJoinAttribute) using an equality comparison. diff --git a/Documentation/Books/Manual/Transactions/Durability.md b/Documentation/Books/Manual/Transactions/Durability.md deleted file mode 100644 index 5d6ba2e8bc6f..000000000000 --- a/Documentation/Books/Manual/Transactions/Durability.md +++ /dev/null @@ -1,85 +0,0 @@ -Durability -========== - -Transactions are executed until there is either a rollback -or a commit. On rollback the operations from the transaction will be reversed. - -On commit, all modifications done in the transaction will be written to the -collection datafiles. These writes will be synchronized to disk if any of the -modified collections has the *waitForSync* property set to *true*, or if any -individual operation in the transaction was executed with the *waitForSync* -attribute. -Additionally, transactions that modify data in more than one collection are -automatically synchronized to disk. This synchronization is done to not only -ensure durability, but to also ensure consistency in case of a server crash. - -That means if you only modify data in a single collection, and that collection -has its *waitForSync* property set to *false*, the whole transaction will not -be synchronized to disk instantly, but with a small delay. - -There is thus the potential risk of losing data between the commit of the -transaction and the actual (delayed) disk synchronization. This is the same as -writing into collections that have the *waitForSync* property set to *false* -outside of a transaction. -In case of a crash with *waitForSync* set to false, the operations performed in -the transaction will either be visible completely or not at all, depending on -whether the delayed synchronization had kicked in or not. - -To ensure durability of transactions on a collection that have the *waitForSync* -property set to *false*, you can set the *waitForSync* attribute of the object -that is passed to *executeTransaction*. This will force a synchronization of the -transaction to disk even for collections that have *waitForSync* set to *false*: - - db._executeTransaction({ - collections: { - write: "users" - }, - waitForSync: true, - action: function () { ... } - }); - - -An alternative is to perform an operation with an explicit *sync* request in -a transaction, e.g. - - db.users.save({ _key: "1234" }, true); - -In this case, the *true* value will make the whole transaction be synchronized -to disk at the commit. - -In any case, ArangoDB will give users the choice of whether or not they want -full durability for single collection transactions. Using the delayed synchronization -(i.e. *waitForSync* with a value of *false*) will potentially increase throughput -and performance of transactions, but will introduce the risk of losing the last -committed transactions in the case of a crash. - -The call to the *_executeTransaction* function -will only return after the data of all modified collections has been synchronized -to disk and the transaction has been made fully durable. This not only reduces the -risk of losing data in case of a crash but also ensures consistency after a -restart. - -MMFiles Storage Engine ----------------------- - -The MMFiles storage engine continuously writes the transaction operation into -a journal file on the disk (Journal is sometimes also referred to as write-ahead-log). - -This means that the commit operation can be very fast because the engine only needs -to write the *commit* marker into the journal (and perform a disk-sync if -*waitForSync* was set to *true*). This also means that failed or aborted -transactions need to be rolled back by reversing every single operation. - -In case of a server crash, any multi-collection transactions that were not yet -committed or in preparation to be committed will be rolled back on server restart. - -RocksDB Storage Engine ----------------------- - -The RocksDB Storage Engine applies operations in a transaction only in main memory -until they are committed. In case of an a rollback the entire transaction is just -cleared, no extra rollback steps are required. - -In the event of a server-crash the storage engine will scan the write-ahead log -to restore certain meta-data like the number of documents in collection -or the selectivity estimates of secondary indexes. \ No newline at end of file diff --git a/Documentation/Books/Manual/Transactions/Limitations.md b/Documentation/Books/Manual/Transactions/Limitations.md deleted file mode 100644 index 55fa9e5b54e1..000000000000 --- a/Documentation/Books/Manual/Transactions/Limitations.md +++ /dev/null @@ -1,164 +0,0 @@ -Limitations -=========== - -In General ----------- - -Transactions in ArangoDB have been designed with particular use cases -in mind. They will be mainly useful for *short and small* data retrieval -and/or modification operations. - -The implementation is **not** optimized for *very long-running* or *very voluminous* -operations, and may not be usable for these cases. - -One limitation is that a transaction operation information must fit into main -memory. The transaction information consists of record pointers, revision numbers -and rollback information. The actual data modification operations of a transaction -are written to the write-ahead log and do not need to fit entirely into main -memory. - -Ongoing transactions will also prevent the write-ahead logs from being fully -garbage-collected. Information in the write-ahead log files cannot be written -to collection data files or be discarded while transactions are ongoing. - -To ensure progress of the write-ahead log garbage collection, transactions should -be kept as small as possible, and big transactions should be split into multiple -smaller transactions. - -Transactions in ArangoDB cannot be nested, i.e. a transaction must not start another -transaction. If an attempt is made to call a transaction from inside a running -transaction, the server will throw error *1651 (nested transactions detected)*. - -It is also disallowed to execute user transaction on some of ArangoDB's own system -collections. This shouldn't be a problem for regular usage as system collections will -not contain user data and there is no need to access them from within a user -transaction. - -Some operations are not allowed inside transactions in general: - -- creation and deletion of databases (`db._createDatabase()`, `db._dropDatabase()`) -- creation and deletion of collections (`db._create()`, `db._drop()`, `db..rename()`) -- creation and deletion of indexes (`db..ensureIndex()`, `db..dropIndex()`) - -If an attempt is made to carry out any of these operations during a transaction, -ArangoDB will abort the transaction with error code *1653 (disallowed operation inside -transaction)*. - -Finally, all collections that may be modified during a transaction must be -declared beforehand, i.e. using the *collections* attribute of the object passed -to the *_executeTransaction* function. If any attempt is made to carry out a data -modification operation on a collection that was not declared in the *collections* -attribute, the transaction will be aborted and ArangoDB will throw error *1652 -unregistered collection used in transaction*. -It is legal to not declare read-only collections, but this should be avoided if -possible to reduce the probability of deadlocks and non-repeatable reads. - -In Clusters ------------ - -Using a single instance of ArangoDB, multi-document / multi-collection queries -are guaranteed to be fully ACID in the [traditional sense](https://en.wikipedia.org/wiki/ACID_(computer_science)). -This is more than many other NoSQL database systems support. -In cluster mode, single-document operations are also *fully ACID*. - -Multi-document / multi-collection queries and transactions offer different guarantees. -Understanding these differences is important when designing applications that need -to be resilient agains outages of individual servers. - -Cluster transactions share the underlying characteristics of the [storage engine](../Architecture/StorageEngines.md) -that is used for the cluster deployment. -A transaction started on a Coordinator translates to one transaction per involved DBServer. -The guarantees and characteristics of the given storage-engine apply additionally -to the cluster specific information below. -Please refer to [Locking and Isolation](LockingAndIsolation.md) for more details -on the storage-engines. - -### Atomicity - -A transaction on *one DBServer* is either committed completely or not at all. - -ArangoDB transactions do currently not require any form of global consensus. This makes -them relatively fast, but also vulnerable to unexpected server outages. - -Should a transaction involve [Leader Shards](../Architecture/DeploymentModes/Cluster/Architecture.md#dbservers) -on *multiple DBServers*, the atomicity of the distributed transaction *during the commit operation* can -not be guaranteed. Should one of the involve DBServers fails during the commit the transaction -is not rolled-back globally, sub-transactions may have been committed on some DBServers, but not on others. -Should this case occur the client application will see an error. - -An improved failure handling issue might be introduced in future versions. - -### Consistency - -We provide consistency even in the cluster, a transaction will never leave the data in -an incorrect or corrupt state. - -In ArangoDB there is always exactly one DBServer responsible for a given shard. In both -Storage-Engines the locking procedures ensure that dependent transactions (in the sense that -the transactions modify the same documents or unique index entries) are ordered sequentially. -Therefore we can provide [Causal-Consistency](https://en.wikipedia.org/wiki/Consistency_model#Causal_consistency) -for your transactions. - -From the applications point-of-view this also means that a given transaction can always -[read it's own writes](https://en.wikipedia.org/wiki/Consistency_model#Read-your-writes_consistency). -Other concurrent operations will not change the database state seen by a transaction. - -### Isolation - -The ArangoDB Cluster provides *Local Snapshot Isolation*. This means that all operations -and queries in the transactions will see the same version, or snapshot, of the data on a given -DBServer. This snapshot is based on the state of the data at the moment in -time when the transaction begins *on that DBServer*. - -### Durability - -It is guaranteed that successfully committed transactions are persistent. Using -replication and / or *waitForSync* increases the durability (Just as with the single-server). - -RocksDB storage engine ---------------------------- - -{% hint 'info' %} -The following restrictions and limitations do not apply to JavaScript -transactions, since their intended use case is for smaller transactions -with full transactional guarantees. So the following only applies -to AQL queries and transactions created through the document API (i.e. batch operations). -{% endhint %} - -Data of ongoing transactions is stored in RAM. Transactions that get too big -(in terms of number of operations involved or the total size of data created or -modified by the transaction) will be committed automatically. Effectively this -means that big user transactions are split into multiple smaller RocksDB -transactions that are committed individually. The entire user transaction will -not necessarily have ACID properties in this case. - -The following global options can be used to control the RAM usage and automatic -intermediate commits for the RocksDB engine: - -`--rocksdb.max-transaction-size` - -Transaction size limit (in bytes). Transactions store all keys and values in -RAM, so large transactions run the risk of causing out-of-memory sitations. -This setting allows you to ensure that does not happen by limiting the size of -any individual transaction. Transactions whose operations would consume more -RAM than this threshold value will abort automatically with error 32 ("resource -limit exceeded"). - -`--rocksdb.intermediate-commit-size` - -If the size of all operations in a transaction reaches this threshold, the transaction -is committed automatically and a new transaction is started. The value is specified in bytes. - -`--rocksdb.intermediate-commit-count` - -If the number of operations in a transaction reaches this value, the transaction is -committed automatically and a new transaction is started. - -The above values can also be adjusted per query, by setting the following -attributes in the call to *db._query()*: - -- *maxTransactionSize*: transaction size limit in bytes -- *intermediateCommitSize*: maximum total size of operations after which an intermediate - commit is performed automatically -- *intermediateCommitCount*: maximum number of operations after which an intermediate - commit is performed automatically diff --git a/Documentation/Books/Manual/Transactions/LockingAndIsolation.md b/Documentation/Books/Manual/Transactions/LockingAndIsolation.md deleted file mode 100644 index 6771c187da65..000000000000 --- a/Documentation/Books/Manual/Transactions/LockingAndIsolation.md +++ /dev/null @@ -1,260 +0,0 @@ -Locking and Isolation -===================== - -Transactions need to specify from which collections they will read data and which -collections they intend to modify. This can be done by setting the *read*, *write*, -or *exclusive* attributes in the *collections* attribute of the transaction: - -```js -db._executeTransaction({ - collections: { - read: "users", - write: ["test", "log"] - }, - action: function () { - const db = require("@arangodb").db; - db.users.toArray().forEach(function(doc) { - db.log.insert({ value: "removed user: " + doc.name }); - db.test.remove(doc._key); - }); - } -}); -``` - -*write* here means write access to the collection, and also includes any read accesses. -*exclusive* is a synonym for *write* in the MMFiles engine, because both *exclusive* and -*write* will acquire collection-level locks in this engine. In the RocksDB engine, -*exclusive* means exclusive write access to the collection, and *write* means (shared) -write access to the collection, which can be interleaved with write accesses by other -concurrent transactions. - -MMFiles engine --------------- - -The *MMFiles engine* uses the following locking mechanisms to serialize transactions -on the same data: - -All collections specified in the *collections* attribute are locked in the -requested mode (read or write) at transaction start. Locking of multiple collections -is performed in alphabetical order. -When a transaction commits or rolls back, all locks are released in reverse order. -The locking order is deterministic to avoid deadlocks. - -While locks are held, modifications by other transactions to the collections -participating in the transaction are prevented. -A transaction will thus see a consistent view of the participating collections' data. - -Additionally, a transaction will not be interrupted or interleaved with any other -ongoing operations on the same collection. This means each transaction will run in -isolation. A transaction should never see uncommitted or rolled back modifications by -other transactions. Additionally, reads inside a transaction are repeatable. - -Note that the above is true only for all collections that are declared in the -*collections* attribute of the transaction. - -RocksDB engine --------------- - -The *RocksDB* engine does not lock any collections participating in a transaction -for read. Read operations can run in parallel to other read or write operations on the -same collections. - - -### Locking - -For all collections that are used in write mode, the RocksDB engine will internally -acquire a (shared) read lock. This means that many writers can modify data in the same -collection in parallel (and also run in parallel to ongoing reads). However, if two -concurrent transactions attempt to modify the same document or index entry, there will -be a write-write conflict, and one of the transactions will abort with error 1200 -("conflict"). It is then up to client applications to retry the failed transaction or -accept the failure. - -In order to guard long-running or complex transactions against concurrent operations -on the same data, the RocksDB engine allows to access collections in exclusive mode. -Exclusive accesses will internally acquire a write-lock on the collections, so they -are not executed in parallel with any other write operations. Read operations can still -be carried out by other concurrent transactions. - -### Isolation - -The RocksDB storage-engine provides *snapshot isolation*. This means that all operations -and queries in the transactions will see the same version, or snapshot, of the database. -This snapshot is based on the state of the database at the moment in time when the transaction -begins. No locks are acquired on the underlying data to keep this snapshot, which permits -other transactions to execute without being blocked by an older uncompleted transaction -(so long as they do not try to modify the same documents or unique index-entries concurrently). -In the cluster a snapshot is acquired on each DBServer individually. - -Lazily adding collections -------------------------- - -There might be situations when declaring all collections a priori is not possible, -for example, because further collections are determined by a dynamic AQL query -inside the transaction, for example a query using AQL graph traversal. - -In this case, it would be impossible to know beforehand which collection to lock, and -thus it is legal to not declare collections that will be accessed in the transaction in -read-only mode. Accessing a non-declared collection in read-only mode during a -transaction will add the collection to the transaction lazily, and fetch data -from the collection as usual. However, as the collection is added lazily, there is no -isolation from other concurrent operations or transactions. Reads from such -collections are potentially non-repeatable. - -**Examples:** - -```js -db._executeTransaction({ - collections: { - read: "users" - }, - action: function () { - const db = require("@arangodb").db; - /* Execute an AQL query that traverses a graph starting at a "users" vertex. - It is yet unknown into which other collections the query might traverse */ - db._createStatement({ - query: `FOR v IN ANY "users/1234" connections RETURN v` - }).execute().toArray().forEach(function (d) { - /* ... */ - }); - } -}); -``` - -This automatic lazy addition of collections to a transaction also introduces the -possibility of deadlocks. Deadlocks may occur if there are concurrent transactions -that try to acquire locks on the same collections lazily. - -In order to make a transaction fail when a non-declared collection is used inside -a transaction for reading, the optional *allowImplicit* sub-attribute of *collections* -can be set to *false*: - -```js -db._executeTransaction({ - collections: { - read: "users", - allowImplicit: false - }, - action: function () { - /* The below query will now fail because the collection "connections" has not - been specified in the list of collections used by the transaction */ - const db = require("@arangodb").db; - db._createStatement({ - query: `FOR v IN ANY "users/1234" connections RETURN v` - }).execute().toArray().forEach(function (d) { - /* ... */ - }); - } -}); -``` - -The default value for *allowImplicit* is *true*. Write-accessing collections that -have not been declared in the *collections* array is never possible, regardless of -the value of *allowImplicit*. - -If *users/1234* has an edge in *connections*, linking it to another document in -the *users* collection, then the following explicit declaration will work: - -```js -db._executeTransaction({ - collections: { - read: ["users", "connections"], - allowImplicit: false - }, - /* ... */ -``` - -If the edge points to a document in another collection however, then the query -will fail, unless that other collection is added to the declaration as well. - -Note that if a document handle is used as starting point for a traversal, e.g. -`FOR v IN ANY "users/not_linked" ...` or `FOR v IN ANY {_id: "users/not_linked"} ...`, -then no error is raised in the case of the start vertex not having any edges to -follow, with `allowImplicit: false` and *users* not being declared for read access. -AQL only sees a string and does not consider it a read access, unless there are -edges connected to it. `FOR v IN ANY DOCUMENT("users/not_linked") ...` will fail -even without edges, as it is always considered to be a read access to the *users* -collection. - -Deadlocks and Deadlock detection --------------------------------- - -A deadlock is a situation in which two or more concurrent operations (user transactions -or AQL queries) try to access the same resources (collections, documents) and need to -wait for the others to finish, but none of them can make any progress. - -A good example for a deadlock is two concurrently executing transactions T1 and T2 that -try to access the same collections but that need to wait for each other. In this example, -transaction T1 will write to collection `c1`, but will also read documents from -collection `c2` without announcing it: - -```js -db._executeTransaction({ - collections: { - write: "c1" - }, - action: function () { - const db = require("@arangodb").db; - - /* write into c1 (announced) */ - db.c1.insert({ foo: "bar" }); - - /* some operation here that takes long to execute... */ - - /* read from c2 (unannounced) */ - db.c2.toArray(); - } -}); -``` - -Transaction T2 announces to write into collection `c2`, but will also read -documents from collection `c1` without announcing it: - -```js -db._executeTransaction({ - collections: { - write: "c2" - }, - action: function () { - var db = require("@arangodb").db; - - /* write into c2 (announced) */ - db.c2.insert({ bar: "baz" }); - - /* some operation here that takes long to execute... */ - - /* read from c1 (unannounced) */ - db.c1.toArray(); - } -}); -``` - -In the above example, a deadlock will occur if transaction T1 and T2 have both -acquired their write locks (T1 for collection `c1` and T2 for collection `c2`) and -are then trying to read from the other other (T1 will read from `c2`, T2 will read -from `c1`). T1 will then try to acquire the read lock on collection `c2`, which -is prevented by transaction T2. T2 however will wait for the read lock on -collection `c1`, which is prevented by transaction T1. - -In case of such deadlock, there would be no progress for any of the involved -transactions, and none of the involved transactions could ever complete. This is -completely undesirable, so the automatic deadlock detection mechanism in ArangoDB -will automatically abort one of the transactions involved in such deadlock. Aborting -means that all changes done by the transaction will be rolled back and error 29 -(`deadlock detected`) will be thrown. - -Client code (AQL queries, user transactions) that accesses more than one collection -should be aware of the potential of deadlocks and should handle the error 29 -(`deadlock detected`) properly, either by passing the exception to the caller or -retrying the operation. - -To avoid both deadlocks and non-repeatable reads, all collections used in a -transaction should be specified in the `collections` attribute when known in advance. -In case this is not possible because collections are added dynamically inside the -transaction, deadlocks may occur and the deadlock detection may kick in and abort -the transaction. - -The *RocksDB* engine uses document-level locks and therefore will not have a deadlock -problem on collection level. If two concurrent transactions however modify the same -documents or index entries, the RocksDB engine will signal a write-write conflict -and abort one of the transactions with error 1200 ("conflict") automatically. diff --git a/Documentation/Books/Manual/Transactions/Passing.md b/Documentation/Books/Manual/Transactions/Passing.md deleted file mode 100644 index 550ab61edbc0..000000000000 --- a/Documentation/Books/Manual/Transactions/Passing.md +++ /dev/null @@ -1,38 +0,0 @@ -Passing parameters to transactions -================================== - -Arbitrary parameters can be passed to transactions by setting the *params* -attribute when declaring the transaction. This feature is handy to re-use the -same transaction code for multiple calls but with different parameters. - -A basic example: - - db._executeTransaction({ - collections: { }, - action: function (params) { - return params[1]; - }, - params: [ 1, 2, 3 ] - }); - -The above example will return *2*. - -Some example that uses collections: - - db._executeTransaction({ - collections: { - write: "users", - read: [ "c1", "c2" ] - }, - action: function (params) { - var db = require('@arangodb').db; - var doc = db.c1.document(params['c1Key']); - db.users.save(doc); - doc = db.c2.document(params['c2Key']); - db.users.save(doc); - }, - params: { - c1Key: "foo", - c2Key: "bar" - } - }); diff --git a/Documentation/Books/Manual/Transactions/README.md b/Documentation/Books/Manual/Transactions/README.md deleted file mode 100644 index e39a47ea26b2..000000000000 --- a/Documentation/Books/Manual/Transactions/README.md +++ /dev/null @@ -1,24 +0,0 @@ -Transactions -============ - -ArangoDB provides support for user-definable transactions. - -Transactions in ArangoDB are atomic, consistent, isolated, and durable (*ACID*). - -These *ACID* properties provide the following guarantees: - -* The *atomicity* principle makes transactions either complete in their - entirety or have no effect at all. -* The *consistency* principle ensures that no constraints or other invariants - will be violated during or after any transaction. A transaction will never - corrupt the database. -* The *isolation* property will hide the modifications of a transaction from - other transactions until the transaction commits. -* Finally, the *durability* proposition makes sure that operations from - transactions that have committed will be made persistent. The amount of - transaction durability is configurable in ArangoDB, as is the durability - on collection level. - -Should you run the ArangoDB Cluster, please see the [Limitations section](Limitations.md#in-clusters) -to see more details regarding transactional behavior of multi-document transactions -in a distributed systems. \ No newline at end of file diff --git a/Documentation/Books/Manual/Transactions/TransactionInvocation.md b/Documentation/Books/Manual/Transactions/TransactionInvocation.md deleted file mode 100644 index 99694653761b..000000000000 --- a/Documentation/Books/Manual/Transactions/TransactionInvocation.md +++ /dev/null @@ -1,414 +0,0 @@ -Transaction invocation -====================== - -ArangoDB transactions are different from transactions in SQL. - -In SQL, transactions are started with explicit *BEGIN* or *START TRANSACTION* -command. Following any series of data retrieval or modification operations, an -SQL transaction is finished with a *COMMIT* command, or rolled back with a -*ROLLBACK* command. There may be client/server communication between the start -and the commit/rollback of an SQL transaction. - -In ArangoDB, a transaction is always a server-side operation, and is executed -on the server in one go, without any client interaction. All operations to be -executed inside a transaction need to be known by the server when the transaction -is started. - -There are no individual *BEGIN*, *COMMIT* or *ROLLBACK* transaction commands -in ArangoDB. Instead, a transaction in ArangoDB is started by providing a -description of the transaction to the *db._executeTransaction* JavaScript -function: - -```js -db._executeTransaction(description); -``` - -This function will then automatically start a transaction, execute all required -data retrieval and/or modification operations, and at the end automatically -commit the transaction. If an error occurs during transaction execution, the -transaction is automatically aborted, and all changes are rolled back. - -Execute transaction -------------------- - - - - -executes a transaction -`db._executeTransaction(object)` - -Executes a server-side transaction, as specified by *object*. - -*object* must have the following attributes: -- *collections*: a sub-object that defines which collections will be - used in the transaction. *collections* can have these attributes: - - *read*: a single collection or a list of collections that will be - used in the transaction in read-only mode - - *write*: a single collection or a list of collections that will be - used in the transaction in write or read mode. -- *action*: a Javascript function or a string with Javascript code - containing all the instructions to be executed inside the transaction. - If the code runs through successfully, the transaction will be committed - at the end. If the code throws an exception, the transaction will be - rolled back and all database operations will be rolled back. - -Additionally, *object* can have the following optional attributes: -- *waitForSync*: boolean flag indicating whether the transaction - is forced to be synchronous. -- *lockTimeout*: a numeric value that can be used to set a timeout for - waiting on collection locks. If not specified, a default value will be - used. Setting *lockTimeout* to *0* will make ArangoDB not time - out waiting for a lock. -- *params*: optional arguments passed to the function specified in - *action*. - -The following attributes can be used for transactions in the RocksDB storage engine: - -- *maxTransactionSize*: transaction size limit in bytes - -Declaration of collections --------------------------- - -All collections which are to participate in a transaction need to be declared -beforehand. This is a necessity to ensure proper locking and isolation. - -Collections can be used in a transaction in write mode or in read-only mode. - -If any data modification operations are to be executed, the collection must be -declared for use in write mode. The write mode allows modifying and reading data -from the collection during the transaction (i.e. the write mode includes the -read mode). - -Contrary, using a collection in read-only mode will only allow performing -read operations on a collection. Any attempt to write into a collection used -in read-only mode will make the transaction fail. - -Collections for a transaction are declared by providing them in the *collections* -attribute of the object passed to the *_executeTransaction* function. The -*collections* attribute has the sub-attributes *read* and *write*: - -```js -db._executeTransaction({ - collections: { - write: [ "users", "logins" ], - read: [ "recommendations" ] - } -}); -``` - -*read* and *write* are optional attributes, and only need to be specified if -the operations inside the transactions demand for it. - -The contents of *read* or *write* can each be lists arrays collection names or a -single collection name (as a string): - -```js -db._executeTransaction({ - collections: { - write: "users", - read: "recommendations" - } -}); -``` - -**Note**: It is currently optional to specify collections for read-only access. -Even without specifying them, it is still possible to read from such collections -from within a transaction, but with relaxed isolation. Please refer to -[Transactions Locking](../Transactions/LockingAndIsolation.md) for more details. - -In order to make a transaction fail when a non-declared collection is used inside -for reading, the optional *allowImplicit* sub-attribute of *collections* can be -set to *false*: - -```js -db._executeTransaction({ - collections: { - read: "recommendations", - allowImplicit: false /* this disallows read access to other collections - than specified */ - }, - action: function () { - var db = require("@arangodb").db; - return db.foobar.toArray(); /* will fail because db.foobar must not be accessed - for reading inside this transaction */ - } -}); -``` - -The default value for *allowImplicit* is *true*. Write-accessing collections that -have not been declared in the *collections* array is never possible, regardless of -the value of *allowImplicit*. - -Declaration of data modification and retrieval operations ---------------------------------------------------------- - -All data modification and retrieval operations that are to be executed inside -the transaction need to be specified in a Javascript function, using the *action* -attribute: - -```js -db._executeTransaction({ - collections: { - write: "users" - }, - action: function () { - // all operations go here - } -}); -``` - -Any valid Javascript code is allowed inside *action* but the code may only -access the collections declared in *collections*. -*action* may be a Javascript function as shown above, or a string representation -of a Javascript function: - -``` -db._executeTransaction({ - collections: { - write: "users" - }, - action: "function () { doSomething(); }" -}); -``` -Please note that any operations specified in *action* will be executed on the -server, in a separate scope. Variables will be bound late. Accessing any JavaScript -variables defined on the client-side or in some other server context from inside -a transaction may not work. -Instead, any variables used inside *action* should be defined inside *action* itself: - -``` -db._executeTransaction({ - collections: { - write: "users" - }, - action: function () { - var db = require(...).db; - db.users.save({ ... }); - } -}); -``` - -When the code inside the *action* attribute is executed, the transaction is -already started and all required locks have been acquired. When the code inside -the *action* attribute finishes, the transaction will automatically commit. -There is no explicit commit command. - -To make a transaction abort and roll back all changes, an exception needs to -be thrown and not caught inside the transaction: - -```js -db._executeTransaction({ - collections: { - write: "users" - }, - action: function () { - var db = require("@arangodb").db; - db.users.save({ _key: "hello" }); - // will abort and roll back the transaction - throw "doh!"; - } -}); -``` - -There is no explicit abort or roll back command. - -As mentioned earlier, a transaction will commit automatically when the end of -the *action* function is reached and no exception has been thrown. In this -case, the user can return any legal JavaScript value from the function: - -```js -db._executeTransaction({ - collections: { - write: "users" - }, - action: function () { - var db = require("@arangodb").db; - db.users.save({ _key: "hello" }); - // will commit the transaction and return the value "hello" - return "hello"; - } -}); -``` - -Throwing Arango Exceptions --------------------------- - -If you catch errors in your transaction, try to get them solved, but fail -you may want to mimic original arangodb error messages to ease the control flow -of your invoking environment. This can be done like this: - -```js -db._executeTransaction({ - collections: {}, - action: function () { - const arangodb = require('@arangodb'); - var err = new arangodb.ArangoError(); - err.errorNum = arangodb.ERROR_BAD_PARAMETER; - err.errorMessage = "who's bad?"; - throw err; - } -}); -``` - -The documentation contains [a complete list of used arangodb errors in the appendix](../Appendix/ErrorCodes.md) - -Custom exceptions ------------------ - -One may wish to define custom exceptions inside of a transaction. To have the -exception propagate upwards properly, please throw an an instance of base -JavaScript `Error` class or a derivative. To specify an error number, include it -as the `errorNumber` field. As an example: - -```js -db._executeTransaction({ - collections: {}, - action: function () { - var err = new Error('My error context'); - err.errorNum = 1234; - throw err; - } -}); -``` - -**Note**: In previous versions, custom exceptions which did not have an -`Error`-like form were simply converted to strings and exposed in the -`exception` field of the returned error. This is no longer the case, as it had -the potential to leak unwanted information if improperly used. - -**Note**: In some versions the above example wouldn't propagate the `errorNum` to the -invoking party, you may need to upgrade your ArangoDB. - -### Examples - -The first example will write 3 documents into a collection named *c1*. -The *c1* collection needs to be declared in the *write* attribute of the -*collections* attribute passed to the *executeTransaction* function. - -The *action* attribute contains the actual transaction code to be executed. -This code contains all data modification operations (3 in this example). - -```js -// setup -db._create("c1"); - -db._executeTransaction({ - collections: { - write: [ "c1" ] - }, - action: function () { - var db = require("@arangodb").db; - db.c1.save({ _key: "key1" }); - db.c1.save({ _key: "key2" }); - db.c1.save({ _key: "key3" }); - } -}); - db.c1.count(); // 3 -``` - -Aborting the transaction by throwing an exception in the *action* function -will revert all changes, so as if the transaction never happened: - -```js -// setup -db._create("c1"); - -db._executeTransaction({ - collections: { - write: [ "c1" ] - }, - action: function () { - var db = require("@arangodb").db; - db.c1.save({ _key: "key1" }); - db.c1.count(); // 1 - db.c1.save({ _key: "key2" }); - db.c1.count(); // 2 - throw "doh!"; - } -}); - -db.c1.count(); // 0 -``` - -The automatic rollback is also executed when an internal exception is thrown -at some point during transaction execution: - -```js -// setup -db._create("c1"); - -db._executeTransaction({ - collections: { - write: [ "c1" ] - }, - action: function () { - var db = require("@arangodb").db; - db.c1.save({ _key: "key1" }); - // will throw duplicate a key error, not explicitly requested by the user - db.c1.save({ _key: "key1" }); - // we'll never get here... - } -}); - -db.c1.count(); // 0 -``` - -As required by the *consistency* principle, aborting or rolling back a -transaction will also restore secondary indexes to the state at transaction -start. - -Cross-collection transactions ------------------------------ - -There's also the possibility to run a transaction across multiple collections. -In this case, multiple collections need to be declared in the *collections* -attribute, e.g.: - -```js -// setup -db._create("c1"); -db._create("c2"); - -db._executeTransaction({ - collections: { - write: [ "c1", "c2" ] - }, - action: function () { - var db = require("@arangodb").db; - db.c1.save({ _key: "key1" }); - db.c2.save({ _key: "key2" }); - } -}); - -db.c1.count(); // 1 -db.c2.count(); // 1 -``` - -Again, throwing an exception from inside the *action* function will make the -transaction abort and roll back all changes in all collections: - -```js -// setup -db._create("c1"); -db._create("c2"); - -db._executeTransaction({ - collections: { - write: [ "c1", "c2" ] - }, - action: function () { - var db = require("@arangodb").db; - for (var i = 0; i < 100; ++i) { - db.c1.save({ _key: "key" + i }); - db.c2.save({ _key: "key" + i }); - } - db.c1.count(); // 100 - db.c2.count(); // 100 - // abort - throw "doh!" - } -}); - -db.c1.count(); // 0 -db.c2.count(); // 0 -``` diff --git a/Documentation/Books/Manual/Troubleshooting/Arangod.md b/Documentation/Books/Manual/Troubleshooting/Arangod.md deleted file mode 100644 index a3eb81711ad6..000000000000 --- a/Documentation/Books/Manual/Troubleshooting/Arangod.md +++ /dev/null @@ -1,62 +0,0 @@ -Arangod -======= - -If the ArangoDB server does not start or if you cannot connect to it -using *arangosh* or other clients, you can try to find the problem cause by -executing the following steps. If the server starts up without problems -you can skip this section. - -- *Check the server log file*: If the server has written a log file you should - check it because it might contain relevant error context information. - -- *Check the configuration*: The server looks for a configuration file - named *arangod.conf* on startup. The contents of this file will be used - as a base configuration that can optionally be overridden with command-line - configuration parameters. You should check the config file for the most - relevant parameters such as: - - *server.endpoint*: What IP address and port to bind to - - *log parameters*: If and where to log - - *database.directory*: Path the database files are stored in - - If the configuration reveals that something is not configured right the config - file should be adjusted and the server be restarted. - -- *Start the server manually and check its output*: Starting the server might - fail even before logging is activated so the server will not produce log - output. This can happen if the server is configured to write the logs to - a file that the server has no permissions on. In this case the server - cannot log an error to the specified log file but will write a startup - error on stderr instead. - Starting the server manually will also allow you to override specific - configuration options, e.g. to turn on/off file or screen logging etc. - -- *Check the TCP port*: If the server starts up but does not accept any incoming - connections this might be due to firewall configuration between the server - and any client(s). The server by default will listen on TCP port 8529. Please - make sure this port is actually accessible by other clients if you plan to use - ArangoDB in a network setup. - - When using hostnames in the configuration or when connecting, please make - sure the hostname is actually resolvable. Resolving hostnames might invoke - DNS, which can be a source of errors on its own. - - It is generally good advice to not use DNS when specifying the endpoints - and connection addresses. Using IP addresses instead will rule out DNS as - a source of errors. Another alternative is to use a hostname specified - in the local */etc/hosts* file, which will then bypass DNS. - -- *Test if *curl* can connect*: Once the server is started, you can quickly - verify if it responds to requests at all. This check allows you to - determine whether connection errors are client-specific or not. If at - least one client can connect, it is likely that connection problems of - other clients are not due to ArangoDB's configuration but due to client - or in-between network configurations. - - You can test connectivity using a simple command such as: - - **curl --dump - -X GET http://127.0.0.1:8529/_api/version && echo** - - This should return a response with an *HTTP 200* status code when the - server is running. If it does it also means the server is generally - accepting connections. Alternative tools to check connectivity are *lynx* - or *ab*. diff --git a/Documentation/Books/Manual/Troubleshooting/Cluster/AgencyDump.md b/Documentation/Books/Manual/Troubleshooting/Cluster/AgencyDump.md deleted file mode 100644 index 163b9a8819af..000000000000 --- a/Documentation/Books/Manual/Troubleshooting/Cluster/AgencyDump.md +++ /dev/null @@ -1,53 +0,0 @@ -How to produce an Agency Dump -============================= - -One can read out all information of an _Agency_ in the following way: - -``` -curl -L http://:/_api/agency/read -d '[["/"]]' -``` - -Please make sure to use the _IP_ (or hostname) and _PORT_ of one _Agent_. - -The `-L` means that the _curl_ request follows redirections in case one talks to a _follower_ instead of the _leader_ of the _Agency_. - -In case of an authenticated _Cluster_, to access _Agents_ a JWT token is needed. - -When authentication is enabled, the user provides a "JWT-secret" to every server via the options (command line or config file). -Let's suppose the JWT-secret is _geheim_. To create the _Agency_ dump, the _secret_ first has to be turned into a token. This can be done in the following way: - -``` -jwtgen -a HS256 -s geheim -c server_id=setup -c iss=arangodb -``` - -which outputs the following text: - -``` -eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpYXQiOjE1Mjc2ODYzMDAsInNlcnZlcl9pZCI6InNldHVwIiwiaXNzIjoiYXJhbmdvZGIifQ.dBUhmxY3Q7rLHHDQc9FL4ghOfGiNJRFws_U2ZX4H-58 -``` - -Note that the `iss` values is essentially arbitrary: the crucial things in the _jwtgen_ command above are the `HS256`, the JWT secret `geheim` and `server_id=setup`. - -`jwtgen` is a node.js program that can be installed on a system with `node.js` as follows: - -``` -npm install -g jwtgen -``` - -The generated token is then used in the following way with `curl`, to produce the _Agency_ dump: - -``` -curl -L http://:/_api/agency/read -d '[["/"]]' -H "Authorization: bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpYXQiOjE1Mjc2ODYzMDAsInNlcnZlcl9pZCI6InNldHVwIiwiaXNzIjoiYXJhbmdvZGIifQ.dBUhmxY3Q7rLHHDQc9FL4ghOfGiNJRFws_U2ZX4H-58" -``` - -Please make sure to use the _IP_ (or hostname) and _PORT_ of one _Agent_. - -The two commands above can be easily executed in a single command in the following way: - -``` -curl -L http://:/_api/agency/read [["/"]]' -H "Authorization: bearer $(jwtgen -a H256 -s geheim -c 'iss=arangodb' -c 'server_id=setup')" -``` - -As always, use the _IP_ (or hostname) and _PORT_ of one _Agent_. - -Should the _Agency_ be down, an _Agency_ dump can still be created starting from the database directory of (one of) the _Agents_. Contact ArangoDB Support, in this case, to obtain more detailed guidelines on how to produce the dump. diff --git a/Documentation/Books/Manual/Troubleshooting/Cluster/README.md b/Documentation/Books/Manual/Troubleshooting/Cluster/README.md deleted file mode 100644 index 5e6cde44e971..000000000000 --- a/Documentation/Books/Manual/Troubleshooting/Cluster/README.md +++ /dev/null @@ -1,28 +0,0 @@ -Cluster Troubleshooting -======================= - -* Cluster frontend is unresponsive - * Check if the _Coordinator_/s in question are started up. - * Check if the _Agency_ is up and a _leader_ has been elected. If not - ensure that all or a majority of _Agents_ are up and running. - * Check if all processes have been started up with the same - `JWT_SECRET`. If not ensure that the `JWT_SECRET` used across - the cluster nodes is identical for every process. - * Check if all cluster nodes have been started with SSL either - dis- or enabled. If not decide what mode of operation you would - like to run your cluster in, and consistently stick with for all - _Agents_, _Coordinators_ and _DBServers_. - * Check if network communication between the cluster nodes is such - that all processes can directly access their peers. Do not - operate proxies between the cluster nodes. - -* Cluster front end announces errors on any number of nodes - * This is an indication that the _Agency_ is running but either - _Coordinators_ or _DBServers_ are disconnected or shut - down. Establish network connection to or start the according - nodes. - * Make sure that the nodes in question share the same `JWT_SECRET` - and SSL operation mode with the functioning nodes. - -Dig deeper into cluster troubleshooting by going through the -[ArangoDB Cluster Administration Course](https://www.arangodb.com/arangodb-cluster-course/). diff --git a/Documentation/Books/Manual/Troubleshooting/DC2DC/README.md b/Documentation/Books/Manual/Troubleshooting/DC2DC/README.md deleted file mode 100644 index 5cb85bebda34..000000000000 --- a/Documentation/Books/Manual/Troubleshooting/DC2DC/README.md +++ /dev/null @@ -1,159 +0,0 @@ - -# Troubleshooting datacenter to datacenter replication - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -The _datacenter to datacenter replication_ is a distributed system with a lot -different components. As with any such system, it requires some, but not a lot, -of operational support. - -This section includes information on how to troubleshoot the -_datacenter to datacenter replication_. - -For a general introduction to the _datacenter to datacenter replication_, please -refer to the [Datacenter to datacenter replication](../../Architecture/DeploymentModes/DC2DC/README.md) -chapter. - -## What means are available to monitor status - -All of the components of _ArangoSync_ provide means to monitor their status. -Below you'll find an overview per component. - -- Sync master & workers: The `arangosync` servers running as either master - or worker, provide: - - A status API, see `arangosync get status`. Make sure that all statuses report `running`. -
For even more detail the following commands are also available: - `arangosync get tasks`, `arangosync get masters` & `arangosync get workers`. - - A log on the standard output. Log levels can be configured using `--log.level` settings. - - A metrics API `GET /metrics`. This API is compatible with Prometheus. - Sample Grafana dashboards for inspecting these metrics are available. - -- ArangoDB cluster: The `arangod` servers that make up the ArangoDB cluster - provide: - - A log file. This is configurable with settings with a `log.` prefix. - E.g. `--log.output=file://myLogFile` or `--log.level=info`. - - A statistics API `GET /_admin/statistics` - -- Kafka cluster: The kafka brokers provide: - - A log file, see settings with `log.` prefix in its `server.properties` configuration file. - -- Zookeeper: The zookeeper agents provide: - - A log on standard output. - -## What to look for while monitoring status - -The very first thing to do when monitoring the status of ArangoSync is to -look into the status provided by `arangosync get status ... -v`. -When not everything is in the `running` state (on both datacenters), this is an -indication that something may be wrong. In case that happens, give it some time -(incremental synchronization may take quite some time for large collections) -and look at the status again. If the statuses do not change (or change, but not reach `running`) -it is time to inspects the metrics & log files. -
When the metrics or logs seem to indicate a problem in a sync master or worker, it is -safe to restart it, as long as only 1 instance is restarted at a time. -Give restarted instances some time to "catch up". - -## What to do when problems remain - -When a problem remains and restarting masters/workers does not solve the problem, -contact support. Make sure to include provide support with the following information: - -- Output of `arangosync get version ...` on both datacenters. -- Output of `arangosync get status ... -v` on both datacenters. -- Output of `arangosync get tasks ... -v` on both datacenters. -- Output of `arangosync get masters ... -v` on both datacenters. -- Output of `arangosync get workers ... -v` on both datacenters. -- Log files of all components -- A complete description of the problem you observed and what you did to resolve it. - -- How to monitor status of ArangoSync -- How to keep it alive -- What to do in case of failures or bugs - -## What to do when a source datacenter is down - -When you use ArangoSync for backup of your cluster from one datacenter -to another and the source datacenter has a complete outage, you may consider -switching your applications to the target (backup) datacenter. - -This is what you must do in that case: - -1. [Stop synchronization](../../Administration/DC2DC/README.md#stopping-synchronization) using: - - ```bash - arangosync stop sync ... - ``` - When the source datacenter is completely unresponsive this will not succeed. - In that case use: - - ```bash - arangosync abort sync ... - ``` - - See [Stopping synchronization](../../Administration/DC2DC/README.md#stopping-synchronization) - for how to cleanup the source datacenter when it becomes available again. - -2. Verify that configuration has completely stopped using: - ```bash - arangosync get status ... -v - ``` - -3. Reconfigure your applications to use the target (backup) datacenter. - -When the original source datacenter is restored, you may switch roles and -make it the target datacenter. To do so, use `arangosync configure sync ...` -as described in [Reversing synchronization direction](../../Administration/DC2DC/README.md#reversing-synchronization-direction). - -## What to do in case of a planned network outage - -All ArangoSync tasks send out heartbeat messages out to the other datacenter -to indicate "it is still alive". The other datacenter assumes the connection is -"out of sync" when it does not receive any messages for a certain period of time. - -If you're planning some sort of maintenance where you know the connectivity -will be lost for some time (e.g. 3 hours), you can prepare ArangoSync for that -such that it will hold off re-synchronization for a given period of time. - -To do so, on both datacenters, run: - -```bash -arangosync set message timeout \ - --master.endpoint= \ - --auth.user= \ - --auth.password= \ - 3h -``` - -The last argument is the period that ArangoSync should hold-off resynchronization for. -This can be minutes (e.g. `15m`) or hours (e.g. `3h`). - -If maintenance is taking longer than expected, you can use the same command the extend -the hold-off period (e.g. to `4h`). - -After the maintenance, use the same command restore the hold-off period to its -default of `1h`. - -## What to do in case of a document that exceeds the message queue limits - -If you insert/update a document in a collection and the size of that document -is larger than the maximum message size of your message queue, the collection -will no longer be able to synchronize. It will go into a `failed` state. - -To recover from that, first remove the document from the ArangoDB cluster -in the source datacenter. After that, for each failed shard, run: - -```bash -arangosync reset failed shard \ - --master.endpoint= \ - --auth.user= \ - --auth.password= \ - --database= \ - --collection= \ - --shard= -``` - -After this command, a new set of tasks will be started to synchronize the shard. -It can take some time for the shard to reach `running` state. diff --git a/Documentation/Books/Manual/Troubleshooting/EmergencyConsole.md b/Documentation/Books/Manual/Troubleshooting/EmergencyConsole.md deleted file mode 100644 index c6bc4507a86f..000000000000 --- a/Documentation/Books/Manual/Troubleshooting/EmergencyConsole.md +++ /dev/null @@ -1,45 +0,0 @@ -Emergency Console -================= - -The ArangoDB database server has two modes of operation: As a server, where it -will answer to client requests and as an emergency console, in which you can -access the database directly. The latter - as the name suggests - should -only be used in case of an emergency, for example, a corrupted -collection. Using the emergency console allows you to issue all commands -normally available in actions and transactions. When starting the server in -emergency console mode, the server cannot handle any client requests. - -You should never start more than one server using the same database directory, -independent of the mode of operation. Normally, ArangoDB will prevent -you from doing this by placing a lockfile in the database directory and -not allowing a second ArangoDB instance to use the same database directory -if a lockfile is already present. - -In Case Of Disaster -------------------- - -The following command starts an emergency console. - -**Note**: Never start the emergency console for a database which also has a -server attached to it. In general, the ArangoDB shell is what you want. - -``` -> ./arangod --console --log error /tmp/vocbase -ArangoDB shell [V8 version 5.0.71.39, DB version 3.x.x] - -arango> 1 + 2; -3 - -arango> var db = require("@arangodb").db; db.geo.count(); -703 - -``` - -The emergency console provides a JavaScript console directly running in the -arangod server process. This allows to debug and examine collections and -documents as with the normal ArangoDB shell, but without client/server -communication. - -However, it is very likely that you will never need the emergency console -unless you are an ArangoDB developer. - diff --git a/Documentation/Books/Manual/Troubleshooting/README.md b/Documentation/Books/Manual/Troubleshooting/README.md deleted file mode 100644 index 1599c76e783d..000000000000 --- a/Documentation/Books/Manual/Troubleshooting/README.md +++ /dev/null @@ -1,10 +0,0 @@ -Troubleshooting -=============== - -- [ArangoDB Server (`arangod`)](Arangod.md) - -- [Emergency Console](EmergencyConsole.md) - -- [Cluster](Cluster/README.md) - -- [Datacenter to datacenter replication](DC2DC/README.md) diff --git a/Documentation/Books/Manual/Tutorials/DC2DC/README.md b/Documentation/Books/Manual/Tutorials/DC2DC/README.md deleted file mode 100644 index a8d187506521..000000000000 --- a/Documentation/Books/Manual/Tutorials/DC2DC/README.md +++ /dev/null @@ -1,361 +0,0 @@ - -# Datacenter to datacenter Replication - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -## About - -At some point in the growth of a database, there comes a need for -replicating it across multiple datacenters. - -Reasons for that can be: - -- Fallback in case of a disaster in one datacenter. -- Regional availability -- Separation of concerns - -And many more. - -This tutorial describes what the ArangoSync datacenter to datacenter -replication solution (ArangoSync from now on) offers, -when to use it, when not to use it and how to configure, -operate, troubleshoot it & keep it safe. - -### What is it - -ArangoSync is a solution that enables you to asynchronously replicate -the entire structure and content in an ArangoDB cluster in one place to a cluster -in another place. Typically it is used from one datacenter to another. -
It is not a solution for replicating single server instances. - -The replication done by ArangoSync is **asynchronous**. This means that when -a client is writing data into the source datacenter, it will consider the -request finished before the data has been replicated to the other datacenter. -The time needed to completely replicate changes to the other datacenter is -typically in the order of seconds, but this can vary significantly depending on -load, network & computer capacity. - -ArangoSync performs replication in a **single direction** only. That means that -you can replicate data from cluster A to cluster B or from cluster B to cluster A, -but never at the same time. -
Data modified in the destination cluster **will be lost!** - -Replication is a completely **autonomous** process. Once it is configured it is -designed to run 24/7 without frequent manual intervention. -
This does not mean that it requires no maintenance or attention at all. -
As with any distributed system some attention is needed to monitor its operation -and keep it secure (e.g. certificate & password rotation). - -Once configured, ArangoSync will replicate both **structure and data** of an -**entire cluster**. This means that there is no need to make additional configuration -changes when adding/removing databases or collections. -
Also meta data such as users, Foxx application & jobs are automatically replicated. - -### When to use it... and when not - -ArangoSync is a good solution in all cases where you want to replicate -data from one cluster to another without the requirement that the data -is available immediately in the other cluster. - -ArangoSync is not a good solution when one of the following applies: - -- You want to replicate data from cluster A to cluster B and from cluster B - to cluster A at the same time. -- You need synchronous replication between 2 clusters. -- There is no network connection between cluster A and B. -- You want complete control over which database, collection & documents are replicate and which not. - -## Requirements - -To use ArangoSync you need the following: - -- Two datacenters, each running an ArangoDB Enterprise Edition cluster, - version 3.3 or higher, using the RocksDB storage engine. -- A network connection between both datacenters with accessible endpoints - for several components (see individual components for details). -- TLS certificates for ArangoSync master instances (can be self-signed). -- TLS certificates for Kafka brokers (can be self-signed). -- Optional (but recommended) TLS certificates for ArangoDB clusters (can be self-signed). -- Client certificates CA for ArangoSync masters (typically self-signed). -- Client certificates for ArangoSync masters (typically self-signed). -- At least 2 instances of the ArangoSync master in each datacenter. -- One instances of the ArangoSync worker on every machine in each datacenter. - -Note: In several places you will need a (x509) certificate. -
The [certificates](#certificates) section below provides more guidance for creating -and renewing these certificates. - -Besides the above list, you probably want to use the following: - -- An orchestrator to keep all components running. In this tutorial we will use `systemd` as an example. -- A log file collector for centralized collection & access to the logs of all components. -- A metrics collector & viewing solution such as Prometheus + Grafana. - -## Deployment - -In the following paragraphs you'll learn which components have to be deployed -for datacenter to datacenter replication using the `direct` message queue. -For detailed deployment instructions or instructions for the `kafka` message queue, -consult the [reference manual](../../Deployment/DC2DC/README.md). - -### ArangoDB cluster - -Datacenter to datacenter replication requires an ArangoDB cluster in both data centers, -configured with the `rocksdb` storage engine. - -Since the cluster agents are so critical to the availability of both the ArangoDB and the ArangoSync cluster, -it is recommended to run agents on dedicated machines. Consider these machines "pets". - -Coordinators and DBServers can be deployed on other machines that should be considered "cattle". - -### Sync Master - -The Sync Master is responsible for managing all synchronization, creating tasks and assigning -those to workers. -
At least 2 instances must be deployed in each datacenter. -One instance will be the "leader", the other will be an inactive slave. When the leader -is gone for a short while, one of the other instances will take over. - -With clusters of a significant size, the sync master will require a significant set of resources. -Therefore it is recommended to deploy sync masters on their own servers, equipped with sufficient -CPU power and memory capacity. - -The sync master must be reachable on a TCP port 8629 (default). -This port must be reachable from inside the datacenter (by sync workers and operations) -and from inside of the other datacenter (by sync masters in the other datacenter). - -Since the sync masters can be CPU intensive when running lots of databases & collections, -it is recommended to run them on dedicated machines with a lot of CPU power. - -Consider these machines "pets". - -### Sync Workers - -The Sync Worker is responsible for executing synchronization tasks. -
For optimal performance at least 1 worker instance must be placed on -every machine that has an ArangoDB DBServer running. This ensures that tasks -can be executed with minimal network traffic outside of the machine. - -Since sync workers will automatically stop once their TLS server certificate expires -(which is set to 2 years by default), -it is recommended to run at least 2 instances of a worker on every machine in the datacenter. -That way, tasks can still be assigned in the most optimal way, even when a worker in temporarily -down for a restart. - -The sync worker must be reachable on a TCP port 8729 (default). -This port must be reachable from inside the datacenter (by sync masters). - -The sync workers should be run on all machines that also contain an ArangoDB DBServer. -The sync worker can be memory intensive when running lots of databases & collections. - -Consider these machines "cattle". - -### Prometheus & Grafana (optional) - -ArangoSync provides metrics in a format supported by [Prometheus](https://prometheus.io). -We also provide a standard set of dashboards for viewing those metrics in [Grafana](https://grafana.org). - -If you want to use these tools, go to their websites for instructions on how to deploy them. - -After deployment, you must configure prometheus using a configuration file that instructs -it about which targets to scrape. For ArangoSync you should configure scrape targets for -all sync masters and all sync workers. -Consult the [reference manual](../../Deployment/DC2DC/PrometheusGrafana.md) for a sample configuration. - -Prometheus can be a memory & CPU intensive process. It is recommended to keep them -on other machines than used to run the ArangoDB cluster or ArangoSync components. - -Consider these machines "cattle", unless you configure alerting on prometheus, -in which case it is recommended to consider these machines "pets". - -## Configuration - -Once all components of the ArangoSync solution have been deployed and are -running properly, ArangoSync will not automatically replicate database structure -and content. For that, it is is needed to configure synchronization. - -To configure synchronization, you need the following: - -- The endpoint of the sync master in the target datacenter. -- The endpoint of the sync master in the source datacenter. -- A certificate (in keyfile format) used for client authentication of the sync master - (with the sync master in the source datacenter). -- A CA certificate (public key only) for verifying the integrity of the sync masters. -- A username+password pair (or client certificate) for authenticating the configure - require with the sync master (in the target datacenter) - -With that information, run: - -```bash -arangosync configure sync \ - --master.endpoint= \ - --master.keyfile= \ - --source.endpoint= \ - --source.cacert= \ - --auth.user= \ - --auth.password= -``` - -The command will finish quickly. Afterwards it will take some time until -the clusters in both datacenters are in sync. - -Use the following command to inspect the status of the synchronization of a datacenter: - -```bash -arangosync get status \ - --master.endpoint= \ - --auth.user= \ - --auth.password= \ - -v -``` - -Note: Invoking this command on the target datacenter will return different results from -invoking it on the source datacenter. You need insight in both results to get a "complete picture". - -ArangoSync has more command to inspect the status of synchronization. -Consult the [reference manual](../../Administration/DC2DC/README.md#inspect-status) for details. - -### Stop synchronization - -If you no longer want to synchronize data from a source to a target datacenter -you must stop it. To do so, run the following command: - -```bash -arangosync stop sync \ - --master.endpoint= \ - --auth.user= \ - --auth.password= -``` - -The command will wait until synchronization has completely stopped before returning. -If the synchronization is not completely stopped within a reasonable period (2 minutes by default) -the command will fail. - -If the source datacenter is no longer available it is not possible to stop synchronization in -a graceful manner. Consult the [reference manual](../../Administration/DC2DC/README.md#stopping-synchronization) -for instructions how to abort synchronization in this case. - -### Reversing synchronization direction - -If you want to reverse the direction of synchronization (e.g. after a failure -in datacenter A and you switched to the datacenter B for fallback), you -must first stop (or abort) the original synchronization. - -Once that is finished (and cleanup has been applied in case of abort), -you must now configure the synchronization again, but with swapped -source & target settings. - -## Operations & Maintenance - -ArangoSync is a distributed system with a lot different components. -As with any such system, it requires some, but not a lot, of operational -support. - -### What means are available to monitor status - -All of the components of ArangoSync provide means to monitor their status. -Below you'll find an overview per component. - -- Sync master & workers: The `arangosync` servers running as either master - or worker, provide: - - A status API, see `arangosync get status`. Make sure that all statuses report `running`. -
For even more detail the following commands are also available: - `arangosync get tasks`, `arangosync get masters` & `arangosync get workers`. - - A log on the standard output. Log levels can be configured using `--log.level` settings. - - A metrics API `GET /metrics`. This API is compatible with Prometheus. - Sample Grafana dashboards for inspecting these metrics are available. - -- ArangoDB cluster: The `arangod` servers that make up the ArangoDB cluster - provide: - - A log file. This is configurable with settings with a `log.` prefix. - E.g. `--log.output=file://myLogFile` or `--log.level=info`. - - A statistics API `GET /_admin/statistics` - -### What to look for while monitoring status - -The very first thing to do when monitoring the status of ArangoSync is to -look into the status provided by `arangosync get status ... -v`. -When not everything is in the `running` state (on both datacenters), this is an -indication that something may be wrong. In case that happens, give it some time -(incremental synchronization may take quite some time for large collections) -and look at the status again. If the statuses do not change (or change, but not reach `running`) -it is time to inspects the metrics & log files. -
When the metrics or logs seem to indicate a problem in a sync master or worker, it is -safe to restart it, as long as only 1 instance is restarted at a time. -Give restarted instances some time to "catch up". - -### 'What if ...' - -Please consult the [reference manual](../../Troubleshooting/DC2DC/README.md) -for details descriptions of what to do in case of certain problems and how and -what information to provide to support so they can assist you best when needed. - -### Metrics - -ArangoSync (master & worker) provide metrics that can be used for monitoring the ArangoSync -solution. These metrics are available using the following HTTPS endpoints: - -- GET `/metrics`: Provides metrics in a format supported by Prometheus. -- GET `/metrics.json`: Provides the same metrics in JSON format. - -Both endpoints include help information per metrics. - -Note: Both endpoints require authentication. Besides the usual authentication methods -these endpoints are also accessible using a special bearer token specified using the `--monitoring.token` -command line option. - -Consult the [reference manual](../../Monitoring/DC2DC/README.md#metrics) -for sample output of the metrics endpoints. - -## Security - -### Firewall settings - -The components of ArangoSync use (TCP) network connections to communicate with each other. - -Consult the [reference manual](../../Security/DC2DC/README.md#firewall-settings) -for a detailed list of connections and the ports that should be accessible. - -### Certificates - -Digital certificates are used in many places in ArangoSync for both encryption -and authentication. - -In ArangoSync all network connections are using Transport Layer Security (TLS), -a set of protocols that ensure that all network traffic is encrypted. -For this TLS certificates are used. The server side of the network connection -offers a TLS certificate. This certificate is (often) verified by the client side of the network -connection, to ensure that the certificate is signed by a trusted Certificate Authority (CA). -This ensures the integrity of the server. - -In several places additional certificates are used for authentication. In those cases -the client side of the connection offers a client certificate (on top of an existing TLS connection). -The server side of the connection uses the client certificate to authenticate -the client and (optionally) decides which rights should be assigned to the client. - -Note: ArangoSync does allow the use of certificates signed by a well know CA (eg. verisign) -however it is more convenient (and common) to use your own CA. - -Consult the [reference manual](../../Security/DC2DC/README.md#certificates) -for detailed instructions on how to create these certificates. - -#### Renewing certificates - -All certificates have meta information in them the limit their use in function, -target & lifetime. -
A certificate created for client authentication (function) cannot be used as a TLS -server certificate (same is true for the reverse). -
A certificate for host `myserver` (target) cannot be used for host `anotherserver`. -
A certificate that is valid until October 2017 (lifetime) cannot be used after October 2017. - -If anything changes in function, target or lifetime you need a new certificate. - -The procedure for creating a renewed certificate is the same as for creating a "first" certificate. -
After creating the renewed certificate the process(es) using them have to be updated. -This mean restarting them. All ArangoSync components are designed to support stopping and starting -single instances, but do not restart more than 1 instance at the same time. -As soon as 1 instance has been restarted, give it some time to "catch up" before restarting -the next instance. diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/AKS.md b/Documentation/Books/Manual/Tutorials/Kubernetes/AKS.md deleted file mode 100644 index 922fda6fdb60..000000000000 --- a/Documentation/Books/Manual/Tutorials/Kubernetes/AKS.md +++ /dev/null @@ -1,105 +0,0 @@ - -# Start ArangoDB on Azure Kubernetes Service (AKS) - -## Requirements - -* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (**version >= 1.10**) -* [helm](https://www.helm.sh/) -* [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-apt?view=azure-cli-latest) - -## Deploy cluster - -* In Azure dashboard choose **Create a resource** -* Choose **Kubernetes Service** - -## Cluster basics (version >=1.10) - -![basics](./aks-create-basics.png) - -## Cluster authentication (Enable RBAC) - -![basics](./aks-create-auth.png) - -## Wait for cluster to be created - -![basics](./aks-create-valid.png) - -## Move to control using `kubectl` - -- Login to Azure using CLI - - ``` - $ az login - [ - { - "cloudName": "AzureCloud", - "id": "...", - "isDefault": true, - "name": "ArangoDB-INC", - "state": "Enabled", - "tenantId": "...", - "user": { - "name": "xxx@arangodb.com", - "type": "user" - } - } - ] - ``` - -- Get AKS credentials to merge with local config, using resource group and - cluster names used for above deployment - - ``` - $ az aks get-credentials --resource-group clifton --name ArangoDB - ``` - -- Verify successful merge - - ``` - $ kubectl get svc - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - kubernetes ClusterIP 10.0.0.1 443/TCP 38m - ``` - -- Initialize `helm` - - ``` - $ kubectl create serviceaccount --namespace kube-system tiller - serviceaccount/tiller created - ``` - - ``` - $ kubectl create clusterrolebinding tiller-cluster-rule \ - --clusterrole=cluster-admin --serviceaccount=kube-system:tiller - clusterrolebinding.rbac.authorization.k8s.io/tiller-cluster-rule created - ``` - - ``` - $ helm init --service-account tiller - $HELM_HOME has been configured at /home/xxx/.helm. - ... - Happy Helming! - - Tiller (the Helm server-side component) has been - installed into your Kubernetes Cluster. - ``` - -- Deploy ArangoDB operator - - ``` - $ helm install \ - github.com/arangodb/kube-arangodb/releases/download/X.X.X/kube-arangodb.tgz - NAME: orderly-hydra - LAST DEPLOYED: Wed Oct 31 15:11:37 2018 - NAMESPACE: default - STATUS: DEPLOYED - ... - See https://docs.arangodb.com/devel/Manual/Tutorials/Kubernetes/ - for how to get started. - ``` - -- Deploy ArangoDB cluster - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/master/examples/simple-cluster.yaml - ``` diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/DC2DC.md b/Documentation/Books/Manual/Tutorials/Kubernetes/DC2DC.md deleted file mode 100644 index 77063f90cd09..000000000000 --- a/Documentation/Books/Manual/Tutorials/Kubernetes/DC2DC.md +++ /dev/null @@ -1,143 +0,0 @@ - -# Start ArangoDB Cluster to Cluster Synchronization on Kubernetes - -This tutorial guides you through the steps needed to configure -an ArangoDB datacenter to datacenter replication between two ArangoDB -clusters running in Kubernetes. - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -## Requirements - -1. This tutorial assumes that you have 2 ArangoDB clusters running in 2 different Kubernetes clusters. -1. Both Kubernetes clusters are equipped with support for `Services` of type `LoadBalancer`. -1. You can create (global) DNS names for configured `Services` with low propagation times. E.g. use Cloudflare. -1. You have 4 DNS names available: - - One for the database in the source ArangoDB cluster. E.g. `src-db.mycompany.com` - - One for the ArangoDB syncmasters in the source ArangoDB cluster. E.g. `src-sync.mycompany.com` - - One for the database in the destination ArangoDB cluster. E.g. `dst-db.mycompany.com` - - One for the ArangoDB syncmasters in the destination ArangoDB cluster. E.g. `dst-sync.mycompany.com` - -## Step 1: Enable Datacenter Replication Support on source ArangoDB cluster - -Set your current Kubernetes context to the Kubernetes source cluster. - -Edit the `ArangoDeployment` of the source ArangoDB clusters. - -Set: - -- `spec.tls.altNames` to `["src-db.mycompany.com"]` (can include more names / IP addresses) -- `spec.sync.enabled` to `true` -- `spec.sync.externalAccess.masterEndpoint` to `["https://src-sync.mycompany.com:8629"]` -- `spec.sync.externalAccess.accessPackageSecretNames` to `["src-accesspackage"]` - -## Step 2: Extract access-package from source ArangoDB cluster - -Run: - -```bash -kubectl get secret src-accesspackage --template='{{index .data "accessPackage.yaml"}}' | \ - base64 -D > accessPackage.yaml -``` - -## Step 3: Configure source DNS names - -Run: - -```bash -kubectl get service -``` - -Find the IP address contained in the `LoadBalancer` column for the following `Services`: - -- `-ea` Use this IP address for the `src-db.mycompany.com` DNS name. -- `-sync` Use this IP address for the `src-sync.mycompany.com` DNS name. - -The process for configuring DNS names is specific to each DNS provider. - -## Step 4: Enable Datacenter Replication Support on destination ArangoDB cluster - -Set your current Kubernetes context to the Kubernetes destination cluster. - -Edit the `ArangoDeployment` of the source ArangoDB clusters. - -Set: - -- `spec.tls.altNames` to `["dst-db.mycompany.com"]` (can include more names / IP addresses) -- `spec.sync.enabled` to `true` -- `spec.sync.externalAccess.masterEndpoint` to `["https://dst-sync.mycompany.com:8629"]` - -## Step 5: Import access package in destination cluster - -Run: - -```bash -kubectl apply -f accessPackage.yaml -``` - -Note: This imports two `Secrets`, containing TLS information about the source cluster, -into the destination cluster - -## Step 6: Configure destination DNS names - -Run: - -```bash -kubectl get service -``` - -Find the IP address contained in the `LoadBalancer` column for the following `Services`: - -- `-ea` Use this IP address for the `dst-db.mycompany.com` DNS name. -- `-sync` Use this IP address for the `dst-sync.mycompany.com` DNS name. - -The process for configuring DNS names is specific to each DNS provider. - -## Step 7: Create an `ArangoDeploymentReplication` resource - -Create a yaml file (e.g. called `src-to-dst-repl.yaml`) with the following content: - -```yaml -apiVersion: "replication.database.arangodb.com/v1alpha" -kind: "ArangoDeploymentReplication" -metadata: - name: "replication-src-to-dst" -spec: - source: - masterEndpoint: ["https://src-sync.mycompany.com:8629"] - auth: - keyfileSecretName: src-accesspackage-auth - tls: - caSecretName: src-accesspackage-ca - destination: - deploymentName: -``` - -## Step 8: Wait for DNS names to propagate - -Wait until the DNS names configured in step 3 and 6 resolve to their configured -IP addresses. - -Depending on your DNS provides this can take a few minutes up to 24 hours. - -## Step 9: Activate replication - -Run: - -```bash -kubectl apply -f src-to-dst-repl.yaml -``` - -Replication from the source cluster to the destination cluster will now be configured. - -Check the status of the replication by inspecting the status of the `ArangoDeploymentReplication` resource using: - -```bash -kubectl describe ArangoDeploymentReplication replication-src-to-dst -``` - -As soon as the replication is configured, the `Add collection` button in the `Collections` -page of the web UI (of the destination cluster) will be grayed out. diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/EKS.md b/Documentation/Books/Manual/Tutorials/Kubernetes/EKS.md deleted file mode 100644 index d9df4ae3e090..000000000000 --- a/Documentation/Books/Manual/Tutorials/Kubernetes/EKS.md +++ /dev/null @@ -1,188 +0,0 @@ - -# Start ArangoDB on Amazon Elastic Kubernetes Service (EKS) - -## Requirements: - -* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (**version >= 1.10**) -* [helm](https://www.helm.sh/) -* [AWS IAM authenticator](https://github.com/kubernetes-sigs/aws-iam-authenticator) -* [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/installing.html) (**version >= 1.16**) - -``` -$ aws --version - aws-cli/1.16.43 Python/2.7.15rc1 Linux/4.15.0-36-generic botocore/1.12.33 -``` - -## Create a Kubernetes cluster - -![clusters](eks-clusters.png) - -## Wait for cluster to be `ACTIVE` -![cluster-active](eks-cluster-active.png) - -## Continue with aws client - -### Configure AWS client - -Refer to the [AWS documentation](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html) -to accordingly fill in the below with your credentials. -Pay special attention to the correct region information to find your cluster next. - -``` -$ aws configure - AWS Access Key ID [None]: AKIAIOSFODNN7EXAMPLE - AWS Secret Access Key [None]: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY - Default region name [None]: us-west-2 - Default output format [None]: json -``` - -Verify that you can see your cluster listed, when authenticated -``` -$ aws eks list-clusters -{ - "clusters": [ - "ArangoDB" - ] -} -``` - -You should be able to verify the `ACTIVE` state of your cluster -``` -$ aws eks describe-cluster --name ArangoDB --query cluster.status - "ACTIVE" -``` - -### Integrate kubernetes configuration locally - -It's time to integrate the cluster into your local kubernetes configurations - -``` -$ aws eks update-kubeconfig --name ArangoDB - Added new context arn:aws:eks:us-west-2:XXXXXXXXXXX:cluster/ArangoDB to ... - -``` - -At this point, we are ready to use kubectl to communicate with the cluster. -``` -$ kubectl get service - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - kubernetes ClusterIP 10.100.0.1 443/TCP 23h -``` - -``` -$ kubectl get nodes - No resources found. -``` - -### Create worker Stack - -On Amazon EKS, we need to launch worker nodes, as the cluster has none. -Open Amazon's [cloud formation console](https://console.aws.amazon.com/cloudformation/) -and choose `Create Stack` by specifying this S3 template URL: - -``` -https://amazon-eks.s3-us-west-2.amazonaws.com/cloudformation/2018-08-30/amazon-eks-nodegroup.yaml -``` - -![formation-template](eks-create-template.png) - -### Worker stack details - -Pay good attention to details here. If your input is not complete, your worker -nodes are either not spawned or you won't be able to integrate the workers -into your kubernetes cluster. - -**Stack name**: Choose a name for your stack. For example ArangoDB-stack - -**ClusterName**: **Important!!!** Use the same name as above, refer to `aws eks list-clusters`. - -**ClusterControlPlaneSecurityGroup**: Choose the same SecurityGroups value as above, when you create your EKS Cluster. - -**NodeGroupName**: Enter a name for your node group for example `ArangoDB-node-group` - -**NodeAutoScalingGroupMinSize**: Minimum number of nodes to which you may scale your workers. - -**NodeAutoScalingGroupMaxSize**: Nomen est omen. - -**NodeInstanceType**: Choose an instance type for your worker nodes. For this test we went with the default `t2.medium` instances. - -**NodeImageId**: Dependent on the region, there are two image Ids for boxes with and without GPU support. - -| Region | without GPU | with GPU | -|-----------|-----------------------|-----------------------| -| us-west-2 | ami-0a54c984b9f908c81 | ami-0440e4f6b9713faf6 | -| us-east-1 | ami-0440e4f6b9713faf6 | ami-058bfb8c236caae89 | -| eu-west-1 | ami-0c7a4976cb6fafd3a | ami-0706dc8a5eed2eed9 | - -**KeyName**: SSH key pair, which may be used to ssh into the nodes. This is required input. - -**VpcId**: The same VPCId, which you get using `aws eks describe-cluster --name --query cluster.resourcesVpcConfig.vpcId` - -**Subnets**: Choose the subnets that you created in Create your Amazon EKS Cluster VPC. - -### Review your stack and submit -![create-review](eks-create-review.png) - -### Wait for stack to get ready -![eks-stack](eks-stack.png) - -### Note down `NodeInstanceRole` -Once stack is ready, navigate at the bottom to the Outputs pane and note down the `NodeInstanceRole` -![eks-stack](eks-stack-ready.png) - -### Integrate worker stack as Kubernetes nodes - -* Download the configuration map here: -``` -$ curl -O https://amazon-eks.s3-us-west-2.amazonaws.com/cloudformation/2018-08-30/aws-auth-cm.yaml -``` -* Modify `data|mapRoles|rolearn` to match the `NoteInstanceRole`, you acquired after your node stack was finished - -* Deploy node integration -``` -$ kubectl apply -f aws-auth-cm.yaml -``` - -### Wait for nodes to join the cluster and get ready -Monitor `kubectl get nodes` and watch your nodes to be ready -``` -$ kubectl get nodes - NAME STATUS ROLES AGE VERSION - ip-172-31-20-103.us-west-2.compute.internal Ready 1d v1.10.3 - ip-172-31-38-160.us-west-2.compute.internal Ready 1d v1.10.3 - ip-172-31-45-199.us-west-2.compute.internal Ready 1d v1.10.3 -``` - -### Setup `helm` -* Create service account for `tiller` -``` -$ kubectl create serviceaccount --namespace kube-system tiller - serviceaccount/tiller created -``` -* Allow `tiller` to modify the cluster -``` -$ kubectl create clusterrolebinding tiller-cluster-rule \ - --clusterrole=cluster-admin --serviceaccount=kube-system:tiller - clusterrolebinding.rbac.authorization.k8s.io/tiller-cluster-rule created -``` -* Initialize `helm` -``` -$ helm init --service-account tiller - $HELM_HOME has been configured at ~/.helm. - ... - Happy Helming! -``` - -### Deploy ArangoDB cluster -``` -$ kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/master/examples/simple-cluster.yaml -``` - -### Wait for cluster to become ready -Get `LoadBalancer` address from below command to access your coordinator. -``` -$ kubectl get svc -``` - -### Secure ArangoDB cluster -Do not forget to immediately assign a secure database `root` password once on coordinator diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/GKE.md b/Documentation/Books/Manual/Tutorials/Kubernetes/GKE.md deleted file mode 100644 index 300cec388671..000000000000 --- a/Documentation/Books/Manual/Tutorials/Kubernetes/GKE.md +++ /dev/null @@ -1,120 +0,0 @@ - -# Start ArangoDB on Google Kubernetes Engine (GKE) - -In this guide you'll learn how to run ArangoDB on Google Kubernetes Engine (GKE). - -## Create a Kubernetes cluster - -In order to run ArangoDB on GKE you first need to create a Kubernetes cluster. - -To do so, go to the GKE console. -You'll find a list of existing clusters (initially empty). - -![clusters](./gke-clusters.png) - -Click on `CREATE CLUSTER`. - -In the form that follows, enter information as seen in the screenshot below. - -![create a cluster](./gke-create-cluster.png) - -We have successfully ran clusters with 4 `1 vCPU` nodes or 3 `2 vCPU` nodes. -Smaller node configurations will likely lead to unschedulable `Pods`. - -Once you click `Create`, you'll return to the list of clusters and your -new cluster will be listed there. - -![with new cluster](./gke-clusters-added.png) - -It will take a few minutes for the cluster to be created. - -Once you're cluster is ready, a `Connect` button will appear in the list. - -![cluster is ready](./gke-clusters-ready.png) - -## Getting access to your Kubernetes cluster - -Once your cluster is ready you must get access to it. -The standard `Connect` button provided by GKE will give you access with only limited -permissions. Since the Kubernetes operator also requires some cluster wide -permissions, you need "administrator" permissions. - -To get these permissions, do the following. - -Prepare your `~/.kube/config` with the following content: - -```yaml -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: - server: https:// - name: my_cluster -contexts: -- context: - cluster: my_cluster - user: my_cluster - name: my_cluster -current-context: my_cluster -kind: Config -preferences: {} -users: -- name: my_cluster - user: - username: admin - password: -``` - -Click on the `Edit` button (pencil image) next to your cluster. -This will lead you to the following page. - -![edit cluster](./gke-edit-cluster.png) - -Copy the `Endpoint` IP address and paste it after `server: https://`. - -The click on `Show credentials`. -The following popup will appear. - -![show credentials](./gke-show-credentials.png) - -Copy the `Password` and paste it after `password:`. - -Close the popup and then return to the cluster list. - -Click on `Connect` next to your cluster. -The following popup will appear. - -![connect to cluster](./gke-connect-to-cluster.png) - -Click on `Run in Cloud Shell`. -It will take some time to launch a shell (in your browser). - -Once ready, run the `gcloud` command that is already prepare in your commandline. - -The run `cat ~/.kube/config` and copy the line that starts with `certificate-authority-data:`. -Override the corresponding line in your local `~/.kube.config` file. - -You should now be able to access your cluster using `kubectl`. - -To verify try a command like: - -```bash -kubectl get pods --all-namespaces -``` - -## Installing `kube-arangodb` - -You can now install the ArangoDB Kubernetes operator in your Kubernetes cluster -on GKE. - -To do so, follow the [Installing kube-arangodb](./README.md#installing-kube-arangodb) instructions. - -## Deploying your first ArangoDB database - -Once the ArangoDB Kubernetes operator has been installed and its `Pods` are in the `Ready` -state, you can launch your first ArangoDB deployment in your Kubernetes cluster -on GKE. - -To do so, follow the [Deploying your first ArangoDB database](./README.md#deploying-your-first-arangodb-database) instructions. - -Note that GKE supports `Services` of type `LoadBalancer`. diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/README.md b/Documentation/Books/Manual/Tutorials/Kubernetes/README.md deleted file mode 100644 index 56f7e6554399..000000000000 --- a/Documentation/Books/Manual/Tutorials/Kubernetes/README.md +++ /dev/null @@ -1,194 +0,0 @@ - -# Start ArangoDB on Kubernetes in 5 minutes - -Starting an ArangoDB database (either single server or full blown cluster) -on Kubernetes involves a lot of resources. - -The servers needs to run in `Pods`, you need `Secrets` for authentication, -TLS certificates and `Services` to enable communication with the database. - -Use `kube-arangodb`, the ArangoDB Kubernetes Operator to greatly simplify -this process. - -In this guide, we will explain what the ArangoDB Kubernetes Operator is, -how to install it and how use it to deploy your first ArangoDB database -in a Kubernetes cluster. - -First, you obviously need a Kubernetes cluster and the right credentials -to access it. If you already have this, you can immediately skip to the -next section. Since different cloud providers differ slightly in their -Kubernetes offering, we have put together detailed tutorials for those -platforms we officially support, follow the link for detailed setup -instructions: - - - [Amazon Elastic Kubernetes Service (EKS)](EKS.md) - - [Google Kubernetes Engine (GKE)](GKE.md) - - [Microsoft Azure Kubernetes Service (AKS)](AKS.md) - -Note that in particular the details of Role Based Access Control (RBAC) -matter. - -## What is `kube-arangodb` - -`kube-arangodb` is a set of two operators that you deploy in your Kubernetes -cluster to (1) manage deployments of the ArangoDB database and (2) -provide `PersistentVolumes` on local storage of your nodes for optimal -storage performance. - -Note that the operator that provides `PersistentVolumes` is not needed to -run ArangoDB deployments. You can also use `PersistentVolumes` provided -by other controllers. - -In this guide we will focus on the `ArangoDeployment` operator. - -## Installing `kube-arangodb` - -To install `kube-arangodb` in your Kubernetes cluster, make sure -you have access to this cluster and the rights to deploy resources -at cluster level. - -For now, any recent Kubernetes cluster will do (e.g. `minikube`). - -Then run (replace `` with the version of the operator that you want to install): - -```bash -kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb//manifests/arango-crd.yaml -kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb//manifests/arango-deployment.yaml -# To use `ArangoLocalStorage`, also run -kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb//manifests/arango-storage.yaml -# To use `ArangoDeploymentReplication`, also run -kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb//manifests/arango-deployment-replication.yaml -``` - -The first command installs two `CustomResourceDefinitions` in your Kubernetes cluster: - -- `ArangoDeployment` is the resource used to deploy ArangoDB database. -- `ArangoDeploymentReplication` is the resource used to deploy ArangoDB DC2DC - replications. - -The second command installs a `Deployment` that runs the operator that controls -`ArangoDeployment` resources. - -The optional third command installs a `Deployment` that runs the operator that -provides `PersistentVolumes` on local disks of the cluster nodes. -Use this when running on bare-metal or if there is no provisioner for fast -storage in your Kubernetes cluster. Furthermore, this also installs a -new custom resource definition: - -- `ArangoLocalStorage` is the resource used to provision `PersistentVolumes` on local storage. - -The optioal fourth command installs a `Deployment` that runs the -operator that takes care of DC2DC replications. - -## Deploying your first ArangoDB database - -The first database we are going to deploy is a single server database. - -Create a file called `single-server.yaml` with the following content. - -```yaml -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "single-server" -spec: - mode: Single -``` - -Now insert this resource in your Kubernetes cluster using: - -```bash -kubectl apply -f single-server.yaml -``` - -The `ArangoDeployment` operator in `kube-arangodb` will now inspect the -resource you just deployed and start the process to run a single server database. - -To inspect the current status of your deployment, run: - -```bash -kubectl describe ArangoDeployment single-server -# or shorter -kubectl describe arango single-server -``` - -To inspect the pods created for this deployment, run: - -```bash -kubectl get pods --selector=arango_deployment=single-server -``` - -The result will look similar to this: - -```plain -NAME READY STATUS RESTARTS AGE -single-server-sngl-cjtdxrgl-fe06f0 1/1 Running 0 1m -``` - -Once the pod reports that it is has a `Running` status and is ready, -your database s available. - -## Connecting to your database - -The single server database you deployed in the previous chapter is now -available from within the Kubernetes cluster as well as outside it. - -Access to the database from outside the Kubernetes cluster is provided -using an external-access service. -By default this service is of type `LoadBalancer`. If this type of service -is not supported by your Kubernetes cluster, it will be replaced by -a service of type `NodePort` after a minute. - -To see the type of service that has been created, run: - -```bash -kubectl get service single-server-ea -``` - -When the service is of the `LoadBalancer` type, use the IP address -listed in the `EXTERNAL-IP` column with port 8529. -When the service is of the `NodePort` type, use the IP address -of any of the nodes of the cluster, combine with the high (>30000) port listed in the `PORT(S)` column. - -Now you can connect your browser to `https://:/`. - -Your browser will show a warning about an unknown certificate. -Accept the certificate for now. - -Then login using username `root` and an empty password. - -If you want to delete your single server ArangoDB database, just run: - -```bash -kubectl delete ArangoDeployment single-server -``` - -## Deploying a full blown ArangoDB cluster database - -The deployment of a full blown cluster is very similar to deploying -a single server database. The difference is in the `mode` field of -the `ArangoDeployment` specification. - -Create a file called `cluster.yaml` with the following content. - -```yaml -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "cluster" -spec: - mode: Cluster -``` - -Now insert this resource in your Kubernetes cluster using: - -```bash -kubectl apply -f cluster.yaml -``` - -The same commands used in the single server deployment can be used -to inspect your cluster. Just use the correct deployment name (`cluster` instead of `single-server`). - -## Where to go from here - -- [ArangoDB Kubernetes Operator](../../Deployment/Kubernetes/README.md) diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/aks-create-auth.png b/Documentation/Books/Manual/Tutorials/Kubernetes/aks-create-auth.png deleted file mode 100644 index 0f6746f8bc60..000000000000 Binary files a/Documentation/Books/Manual/Tutorials/Kubernetes/aks-create-auth.png and /dev/null differ diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/aks-create-basics.png b/Documentation/Books/Manual/Tutorials/Kubernetes/aks-create-basics.png deleted file mode 100644 index e2b644500dfb..000000000000 Binary files a/Documentation/Books/Manual/Tutorials/Kubernetes/aks-create-basics.png and /dev/null differ diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/aks-create-valid.png b/Documentation/Books/Manual/Tutorials/Kubernetes/aks-create-valid.png deleted file mode 100644 index 9af3d6acb72f..000000000000 Binary files a/Documentation/Books/Manual/Tutorials/Kubernetes/aks-create-valid.png and /dev/null differ diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/eks-cluster-active.png b/Documentation/Books/Manual/Tutorials/Kubernetes/eks-cluster-active.png deleted file mode 100644 index 0f433bafa885..000000000000 Binary files a/Documentation/Books/Manual/Tutorials/Kubernetes/eks-cluster-active.png and /dev/null differ diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/eks-clusters.png b/Documentation/Books/Manual/Tutorials/Kubernetes/eks-clusters.png deleted file mode 100644 index 006308ceab27..000000000000 Binary files a/Documentation/Books/Manual/Tutorials/Kubernetes/eks-clusters.png and /dev/null differ diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/eks-create-review.png b/Documentation/Books/Manual/Tutorials/Kubernetes/eks-create-review.png deleted file mode 100644 index c4ff95be7393..000000000000 Binary files a/Documentation/Books/Manual/Tutorials/Kubernetes/eks-create-review.png and /dev/null differ diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/eks-create-template.png b/Documentation/Books/Manual/Tutorials/Kubernetes/eks-create-template.png deleted file mode 100644 index 2a62b9ce9ef8..000000000000 Binary files a/Documentation/Books/Manual/Tutorials/Kubernetes/eks-create-template.png and /dev/null differ diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/eks-stack-ready.png b/Documentation/Books/Manual/Tutorials/Kubernetes/eks-stack-ready.png deleted file mode 100644 index 511bef48fe4f..000000000000 Binary files a/Documentation/Books/Manual/Tutorials/Kubernetes/eks-stack-ready.png and /dev/null differ diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/eks-stack.png b/Documentation/Books/Manual/Tutorials/Kubernetes/eks-stack.png deleted file mode 100644 index 3cc192bb8ab8..000000000000 Binary files a/Documentation/Books/Manual/Tutorials/Kubernetes/eks-stack.png and /dev/null differ diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/gke-clusters-added.png b/Documentation/Books/Manual/Tutorials/Kubernetes/gke-clusters-added.png deleted file mode 100644 index 845a4449550e..000000000000 Binary files a/Documentation/Books/Manual/Tutorials/Kubernetes/gke-clusters-added.png and /dev/null differ diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/gke-clusters-ready.png b/Documentation/Books/Manual/Tutorials/Kubernetes/gke-clusters-ready.png deleted file mode 100644 index 7ecf21fd81c1..000000000000 Binary files a/Documentation/Books/Manual/Tutorials/Kubernetes/gke-clusters-ready.png and /dev/null differ diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/gke-clusters.png b/Documentation/Books/Manual/Tutorials/Kubernetes/gke-clusters.png deleted file mode 100644 index dd9a033bf51e..000000000000 Binary files a/Documentation/Books/Manual/Tutorials/Kubernetes/gke-clusters.png and /dev/null differ diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/gke-connect-to-cluster.png b/Documentation/Books/Manual/Tutorials/Kubernetes/gke-connect-to-cluster.png deleted file mode 100644 index 80c824f509be..000000000000 Binary files a/Documentation/Books/Manual/Tutorials/Kubernetes/gke-connect-to-cluster.png and /dev/null differ diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/gke-create-cluster.png b/Documentation/Books/Manual/Tutorials/Kubernetes/gke-create-cluster.png deleted file mode 100644 index 2108d598191f..000000000000 Binary files a/Documentation/Books/Manual/Tutorials/Kubernetes/gke-create-cluster.png and /dev/null differ diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/gke-edit-cluster.png b/Documentation/Books/Manual/Tutorials/Kubernetes/gke-edit-cluster.png deleted file mode 100644 index cd503e53ca53..000000000000 Binary files a/Documentation/Books/Manual/Tutorials/Kubernetes/gke-edit-cluster.png and /dev/null differ diff --git a/Documentation/Books/Manual/Tutorials/Kubernetes/gke-show-credentials.png b/Documentation/Books/Manual/Tutorials/Kubernetes/gke-show-credentials.png deleted file mode 100644 index bf70bbd0f22f..000000000000 Binary files a/Documentation/Books/Manual/Tutorials/Kubernetes/gke-show-credentials.png and /dev/null differ diff --git a/Documentation/Books/Manual/Tutorials/README.md b/Documentation/Books/Manual/Tutorials/README.md deleted file mode 100644 index bdd336f53d72..000000000000 --- a/Documentation/Books/Manual/Tutorials/README.md +++ /dev/null @@ -1,69 +0,0 @@ -Tutorials -========= - -- [CRUD](https://www.arangodb.com/tutorials/arangodb-crud/): - Document CRUD (Create, Read, Update, Delete) with AQL and HTTP API - -- [AQL](../../AQL/Tutorial/index.html): - Introduction to ArangoDB's query language AQL - -- [Performance Course](https://www.arangodb.com/arangodb-performance-course/): - Performance Optimization Basics: How to make your queries faster - -- [Geo Queries](https://www.arangodb.com/using-arangodb-geo-index-cursor-via-aql/): - How to use the ArangoDB Geo Index Cursor via AQL - -- [Foxx](../Foxx/GettingStarted.md): - Create your first "Hello World!" Foxx service - -- [ArangoSearch](https://www.arangodb.com/tutorials/arangosearch/): - The implementation of ArangoDB's view engine - -Deployment & Administration ---------------------------- - -- [ArangoDB Starter](Starter/README.md): - Starting an ArangoDB Cluster or database the easy way - -- [Datacenter to datacenter Replication](DC2DC/README.md): - A tutorial about the _ArangoSync_ DC2DC solution - -- [Kubernetes](Kubernetes/README.md): - Start ArangoDB on Kubernetes in 5 minutes - -- [DC2DC on Kubernetes](Kubernetes/DC2DC.md): - Start DC2DC between two ArangoDB clusters running in Kubernetes - -- [Monitor ArangoDB](https://www.arangodb.com/tutorials/monitoring-collectd-prometheus-grafana/): - Set up a monitoring system for ArangoDB using _collectd_, _Prometheus_ and _Grafana_ - -- [Cluster Administration Course](https://www.arangodb.com/arangodb-cluster-course/): - Concepts, maintenance, resilience and troubleshooting - -Graphs ------- - -- [Graph Course](https://www.arangodb.com/arangodb-graph-course/): - Get started with ArangoDB's graph related features - -- [SmartGraphs](https://www.arangodb.com/using-smartgraphs-arangodb/): - Performance benefit for graphs sharded in an ArangoDB Cluster - -- [Pregel Community Detection](https://www.arangodb.com/pregel-community-detection/): - Find an underlying community structure in a network - -- [Smartifier](https://www.arangodb.com/arangodb-smartifier/): - Transforming an existing Graph dataset into a SmartGraph for Enterprise level scaling - -Languages and Integrations --------------------------- - -- [Spring Data Demo](https://www.arangodb.com/tutorials/spring-data/): - Basic Spring Data Usage with a Game of Thrones dataset - -Tutorial | Language | Driver ----------|----------|------- -[Node.js in 10 Minutes](https://www.arangodb.com/tutorials/tutorial-node-js/) | JavaScript | [ArangoJS](https://github.com/arangodb/arangojs) -[Java in 10 Minutes](https://www.arangodb.com/tutorials/tutorial-sync-java-driver/) | Java | [ArangoDB-Java-Driver](https://github.com/arangodb/arangodb-java-driver) -[PHP in 10 Minutes](https://www.arangodb.com/tutorials/tutorial-php/) | PHP | [ArangoDB-PHP](https://github.com/arangodb/arangodb-php) -[Python in 10 Minutes](https://www.arangodb.com/tutorials/tutorial-python/) | Python | [pyArango](https://github.com/tariqdaouda/pyArango) diff --git a/Documentation/Books/Manual/Tutorials/Starter/README.md b/Documentation/Books/Manual/Tutorials/Starter/README.md deleted file mode 100644 index bd7becc0aa90..000000000000 --- a/Documentation/Books/Manual/Tutorials/Starter/README.md +++ /dev/null @@ -1,327 +0,0 @@ - -# Starting an ArangoDB cluster or database the easy way - -Starting an ArangoDB cluster involves starting various servers with -different roles (agents, dbservers & coordinators). - -The ArangoDB Starter is designed to make it easy to start and -maintain an ArangoDB cluster or single server database. - -Besides starting and maintaining ArangoDB deployments, the starter also provides -various commands to create TLS certificates & JWT token secrets to secure your -ArangoDB deployment. - -## Installation - -The ArangoDB starter (`arangodb`) comes with all current distributions of ArangoDB. - -If you want a specific version, download the precompiled binary via the -[GitHub releases page](https://github.com/arangodb-helper/arangodb/releases). - -## Starting a cluster - -An ArangoDB cluster typically involves 3 machines. -ArangoDB must be installed on all of them. - -Then start the ArangoDB starter of all 3 machines like this: - -On host A: - -```bash -arangodb -``` - -This will use port 8528 to wait for colleagues (3 are needed for a -resilient agency). On host B (can be the same as A): - -```bash -arangodb --starter.join A -``` - -This will contact A on port 8528 and register. On host C (can be same -as A or B): - -```bash -arangodb --starter.join A -``` - -This will contact A on port 8528 and register. - -From the moment on when 3 have joined, each will fire up an agent, a -coordinator and a dbserver and the cluster is up. Ports are shown on -the console, the starter uses the next few ports above the starter -port. That is, if one uses port 8528 for the starter, the coordinator -will use 8529 (=8528+1), the dbserver 8530 (=8528+2), and the agent 8531 -(=8528+3). You can change the default starter port with the `--starter.port` -[option](../../Programs/Starter/Options.md). - -Additional servers can be added in the same way. - -If two or more of the `arangodb` instances run on the same machine, -one has to use the `--starter.data-dir` option to let each use a different -directory. - -The `arangodb` program will find the ArangoDB executable (`arangod`) and the -other installation files automatically. If this fails, use the -`--server.arangod` and `--server.js-dir` options described below. - -## Running in Docker - -You can run `arangodb` using our ready made docker container. - -When using `arangodb` in a Docker container it will also run all -servers in a docker using the `arangodb/arangodb:latest` docker image. -If you wish to run a specific docker image for the servers, specify it using -the `--docker.image` argument. - -When running in docker it is important to care about the volume mappings on -the container. Typically you will start the executable in docker with the following -commands. - -```bash -export IP= -docker volume create arangodb1 -docker run -it --name=adb1 --rm -p 8528:8528 \ - -v arangodb1:/data \ - -v /var/run/docker.sock:/var/run/docker.sock \ - arangodb/arangodb-starter \ - --starter.address=$IP -``` - - -The executable will show the commands needed to run the other instances. - -Note that the commands above create a docker volume. If you're running on Linux -it is also possible to use a host mapped volume. Make sure to map it -on `/data`. - -**TLS verified Docker services** - -Oftentimes, one needs to harden Docker services using client certificate -and TLS verification. The Docker API allows subsequently only -certified access. As the ArangoDB starter starts the ArangoDB cluster -instances using this Docker API, it is mandatory that the ArangoDB -starter is deployed with the proper certificates handed to it, so that -the above command is modified as follows: - -```bash -export IP= -export DOCKER_CERT_PATH=/path/to/certificate -docker volume create arangodb -docker run -it --name=adb --rm -p 8528:8528 \ - -v arangodb:/data \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v $DOCKER_CERT_PATH:$DOCKER_CERT_PATH - -e DOCKER_TLS_VERIFY=1 - -e DOCKER_CERT_PATH=$DOCKER_CERT_PATH - arangodb/arangodb-starter \ - --starter.address=$IP \ - --starter.join=A,B,C -``` - -Note that the enviroment variables `DOCKER_TLS_VERIFY` and `DOCKER_CERT_PATH` -as well as the additional mountpoint containing the certificate have been added above. -directory. The assignment of `DOCKER_CERT_PATH` is optional, in which case it -is mandatory that the certificates are stored in `$HOME/.docker`. So -the command would then be as follows - -```bash -export IP= -docker volume create arangodb -docker run -it --name=adb --rm -p 8528:8528 \ - -v arangodb:/data \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v /path/to/cert:/root/.docker \ - -e DOCKER_TLS_VERIFY=1 \ - arangodb/arangodb-starter \ - --starter.address=$IP \ - --starter.join=A,B,C -``` - - -The TLS verification above applies equally to all below deployment modes. - -## Using multiple join arguments - -It is allowed to use multiple `--starter.join` arguments. -This eases scripting. For example: - -On host A: - -```bash -arangodb --starter.join A,B,C -``` - -On host B: - -```bash -arangodb --starter.join A,B,C -``` - -On host C: - -```bash -arangodb --starter.join A,B,C -``` - -This starts a cluster where the starter on host A is chosen to be master during the bootstrap phase. - -Note: `arangodb --starter.join A,B,C` is equal to `arangodb --starter.join A --starter.join B --starter.join C`. - -During the bootstrap phase of the cluster, the starters will all choose the "master" starter -based on list of given `starter.join` arguments. - -The "master" starter is chosen as follows: - -- If there are no `starter.join` arguments, the starter becomes a master. -- If there are multiple `starter.join` arguments, these arguments are sorted. If a starter is the first - in this sorted list, it becomes a starter. -- In all other cases, the starter becomes a slave. - -Note: Once the bootstrap phase is over (all arangod servers have started and are running), the bootstrap -phase ends and the starters use the Arango agency to elect a master for the runtime phase. - -## Starting a local test cluster - -If you want to start a local cluster quickly, use the `--starter.local` flag. -It will start all servers within the context of a single starter process. - -```bash -arangodb --starter.local -``` - -Using the starter this way does not provide resilience and high availability of your cluster! - -Note: When you restart the starter, it remembers the original `--starter.local` flag. - -## Starting a cluster with datacenter to datacenter synchronization - -{% hint 'info' %} -This feature is only available in the -[**Enterprise Edition**](https://www.arangodb.com/why-arangodb/arangodb-enterprise/) -{% endhint %} - -Datacenter to datacenter replication (DC2DC) requires a normal ArangoDB cluster in both data centers -and one or more (`arangosync`) syncmasters & syncworkers in both data centers. -The starter enables you to run these syncmasters & syncworkers in combination with your normal -cluster. - -To run a starter with DC2DC support you add the following arguments to the starters command line: - -```bash ---auth.jwt-secret= ---starter.address= ---starter.sync ---server.storage-engine=rocksdb ---sync.master.jwt-secret= ---sync.server.keyfile= ---sync.server.client-cafile= -``` - -Consult `arangosync` documentation for instructions how to create all certificates & keyfiles. - -## Starting a single server - -If you want to start a single database server, use `--starter.mode=single`. - -```bash -arangodb --starter.mode=single -``` - -## Starting a single server in Docker - -If you want to start a single database server running in a docker container, -use the normal docker arguments, combined with `--starter.mode=single`. - -```bash -export IP= -docker volume create arangodb -docker run -it --name=adb --rm -p 8528:8528 \ - -v arangodb:/data \ - -v /var/run/docker.sock:/var/run/docker.sock \ - arangodb/arangodb-starter \ - --starter.address=$IP \ - --starter.mode=single -``` - -## Starting a resilient single server pair - -If you want to start a resilient single database server, use `--starter.mode=activefailover`. -In this mode a 3 machine agency is started and 2 single servers that perform -asynchronous replication an failover if needed. - -```bash -arangodb --starter.mode=activefailover --starter.join A,B,C -``` - -Run this on machine A, B & C. - -The starter will decide on which 2 machines to run a single server instance. -To override this decision (only valid while bootstrapping), add a -`--cluster.start-single=false` to the machine where the single server -instance should NOT be scheduled. - -## Starting a resilient single server pair in Docker - -If you want to start a resilient single database server running in docker containers, -use the normal docker arguments, combined with `--starter.mode=activefailover`. - -```bash -export IP= -docker volume create arangodb -docker run -it --name=adb --rm -p 8528:8528 \ - -v arangodb:/data \ - -v /var/run/docker.sock:/var/run/docker.sock \ - arangodb/arangodb-starter \ - --starter.address=$IP \ - --starter.mode=activefailover \ - --starter.join=A,B,C -``` - -Run this on machine A, B & C. - -The starter will decide on which 2 machines to run a single server instance. -To override this decision (only valid while bootstrapping), add a -`--cluster.start-single=false` to the machine where the single server -instance should NOT be scheduled. - -## Starting a local test resilient single sever pair - -If you want to start a local resilient server pair quickly, use the `--starter.local` flag. -It will start all servers within the context of a single starter process. - -```bash -arangodb --starter.local --starter.mode=activefailover -``` - -Note: When you restart the started, it remembers the original `--starter.local` flag. - -## Starting & stopping in detached mode - -If you want the starter to detach and run as a background process, use the `start` -command. This is typically used by developers running tests only. - -```bash -arangodb start --starter.local=true [--starter.wait] -``` - -This command will make the starter run another starter process in the background -(that starts all ArangoDB servers), wait for it's HTTP API to be available and -then exit. The starter that was started in the background will keep running until you stop it. - -The `--starter.wait` option makes the `start` command wait until all ArangoDB server -are really up, before ending the master process. - -To stop a starter use this command. - -```bash -arangodb stop -``` - -Make sure to match the arguments given to start the starter (`--starter.port` & `--ssl.*`). - -## More information - -- [Options](../../Programs/Starter/Options.md) contains a list of all commandline options supported by the starter. -- [Security](../../Programs/Starter/Security.md) contains instructions of how to create certificates & tokens needed - to secure an ArangoDB deployment. diff --git a/Documentation/Books/Manual/Uninstallation/README.md b/Documentation/Books/Manual/Uninstallation/README.md deleted file mode 100644 index a5e2e17278a5..000000000000 --- a/Documentation/Books/Manual/Uninstallation/README.md +++ /dev/null @@ -1,31 +0,0 @@ -Uninstallation -============== - -Uninstallation depends on the method used to install ArangoDB, and on the -operating system in use, and typically consists of the following high-level steps: - -**If an installation package is used:** - -- Uninstallation of the ArangoDB package, which may or may not remove the - database directory and the configuration files in addition to the ArangoDB - _binaries_, depending on how the uninstallation process is started and on - the operating system in use. -- Optional removal of the leftover files (database directory and/or - configuration files) - -**If a _tar.gz_ or _.zip_ archive is used:** - -- Removal of the files and folders that were unpacked from the archive. - A data directory might exist in a subdirectory of the root folder. -- Optional removal of the leftover files (database directory and/or - configuration files) stored in a different place - -{% hint 'info' %} -If the ArangoDB _Starter_ was used to deploy ArangoDB, there will be an -additional manual step: - -- Removal of the data directory created by the _Starter_ - (the path used for the option `--starter.data-dir`). - This is different and in addition to the removal of the data directory - that is created for the _Single Instance_ by the installation package. -{% endhint %} diff --git a/Documentation/Books/Manual/Upgrading/CommunityToEnterprise.md b/Documentation/Books/Manual/Upgrading/CommunityToEnterprise.md deleted file mode 100644 index 58a05595fcfb..000000000000 --- a/Documentation/Books/Manual/Upgrading/CommunityToEnterprise.md +++ /dev/null @@ -1,57 +0,0 @@ -Community to Enterprise Upgrade Procedure -========================================= - -{% hint 'warning' %} -While migrating from the Community to the Enterprise Edition is supported, -installing directly the Enterprise package over the Community package is **not** -supported. Please see below for the correct migration procedure. -{% endhint %} - -{% hint 'danger' %} -Migrating from Enterprise to Community Edition is, in general, **not** supported. This -is because the Community Edition does not include some features, such as -[SmartGraphs](../Graphs/SmartGraphs/README.md) that, if used while the database -was running under the Enterprise Edition, do not make easily possible the -conversion of some database structures. -{% endhint %} - -Upgrading from the Community to the Enterprise Edition requires unistallation of -the Community package (can be done in a way that the database data are preserved) -and installation of the Enterprise package. The upgrade can be done in a -[_logical_](#procedure-for-a-logical-upgrade) or -[_in-place_](#procedure-for-an-in-place-upgrade) way. Please refer to the -[Upgrade Methods](GeneralInfo/README.md#upgrade-methods) section for a general -description of the two methods. Refer to the sections below for a detailed -procedure. - -Procedure for a _Logical_ Upgrade ---------------------------------- - -1. Use the tool [_arangodump_](../Programs/Arangodump/README.md) to take a backup - of your data stored by your Community Edition installation -2. Uninstall the ArangoDB Community Edition package -3. Install the ArangoDB Enterprise Edition package - (and start your _Single Instance_, _Active Failover_ or _Cluster_) -4. Restore the backup using the tool [_arangorestore_](../Programs/Arangorestore/README.md). - -Procedure for an _In-Place_ Upgrade ------------------------------------ - -1. Shutdown ArangoDB and make a copy of your data directory (e.g., in Linux, by - using the _cp_ command). If you are using a setup that involves several _arangod_ processes - (e.g. _Active Failover_ or _Cluster_) please make sure all _arangod_ processes - are stopped and all the data directories in use are copied in a safe location -2. Uninstall the ArangoDB Community Edition package (make sure this is done in a way that - your database is kept on your disk, e.g. on _Debian_ systems do **not** use the - _purge_ option of _dpkg_ or, on Windows, do **not** check the "_Delete databases with - unistallation?_" option) -3. Install the ArangoDB Enterprise Edition package -4. If you are moving from version A to version B, where B > A, start _arangod_ on - your data directory with the option `--database.auto-upgrade` (in addition to - any other options you are currently using). The server will stop after a while - (check the log file of _arangod_ as it should contain relevant information about - the upgrade). If you are using a setup that involves several _arangod_ processes - (e.g. _Active Failover_ or _Cluster_) this step has to be repeated for all _arangod_ - processes -5. Start ArangoDB Enterprise Edition - (in the same way you were starting ArangoDB Community Edition) diff --git a/Documentation/Books/Manual/Upgrading/GeneralInfo/README.md b/Documentation/Books/Manual/Upgrading/GeneralInfo/README.md deleted file mode 100644 index 20446bba585c..000000000000 --- a/Documentation/Books/Manual/Upgrading/GeneralInfo/README.md +++ /dev/null @@ -1,66 +0,0 @@ -General Upgrade Information -=========================== - -Upgrade Methods ---------------- - -There are two main ways to upgrade ArangoDB: - -- _In-Place_ upgrade: when the installed ArangoDB package is replaced with the new one, and - the new ArangoDB binary is started on the existing data directory. -- _Logical_ upgrade: when the data is exported from the old ArangoDB version, - using [_arangodump_ ](..\..\Programs\Arangodump\README.md) and then restored in - the new ArangoDB version using [_arangorestore_ ](..\..\Programs\Arangorestore\README.md). - Depending on the size of your database, this strategy can be more time consuming, - but needed in some circumstances. - -Before the Upgrade ------------------- - -Before upgrading, it is recommended to: - -- Check the [CHANGELOG](../../ReleaseNotes/README.md#changelogs) and the - [list of incompatible changes](../../ReleaseNotes/README.md#incompatible-changes) - for API or other changes in the new version of ArangoDB, and make sure your applications - can deal with them. -- As an extra precaution, and as a requirement if you want to [downgrade](../../Downgrading/README.md), - you might want to: - - Take a backup of the old ArangoDB database, using [Arangodump](../../Programs/Arangodump/README.md), - as well as - - Copy the entire "old" data directory to a safe place, after stopping the ArangoDB Server - running on it (if you are running an Active Failover, or a Cluster, you will need to take - a copy of their data directories, from all involved machines, after stopping all the running - ArangoDB processes). - -Upgrade Paths -------------- - -- It is always possible to upgrade between hot-fixes of the same GA release, i.e - from X.Y.w to X.Y.z, where z>w. - - Examples: - - Upgrading from 3.4.0 to 3.4.1 or (directly to) 3.4.2 is supported. - - Upgrading from 3.3.7 to 3.3.8 or (directly to) 3.3.11 is supported. - - Upgrading from 3.2.12 to 3.2.13 or (directly to) 3.2.15 is supported. -- It possible to upgrade between two different consecutive GA releases, but it is - not officially supported to upgrade if the two GA releases are not consecutive - (in this case, you first have to upgrade to all intermediate releases). - - Examples: - - Upgrading from 3.3 to 3.4 is supported. - - Upgrading from 3.2 to 3.3 is supported. - - Upgrading from 3.2 to 3.4 directly is not officially supported: the officially - supported upgrade path in this case is 3.2 to 3.3, and then 3.3 to 3.4. - - **Important:** before upgrading between two consecutive GA releases it is highly recommended - to first upgrade the previous GA release to its latest hot-fix. - - Examples: - - To upgrade from 3.2 to 3.3, first upgrade your 3.2 installation to 3.2.latest. - - To upgrade from 3.3 to 3.4, first upgrade your 3.3 installation to 3.3.latest. - -### Additional Notes Regarding Rolling Upgrades - -In addition to the paragraph above, rolling upgrades via the tool _Starter_ are supported, -as documented in the _Section_ [Upgrading Starter Deployments](../Starter/README.md), -with the following limitations: - -- Rolling upgrades between 3.3 and 3.4 are not supported before 3.3.20 and 3.4.0. -- Rolling upgrades between 3.2 and 3.3 are not supported before 3.2.15 and 3.3.9. - diff --git a/Documentation/Books/Manual/Upgrading/Kubernetes/README.md b/Documentation/Books/Manual/Upgrading/Kubernetes/README.md deleted file mode 100644 index 639400fa2c5a..000000000000 --- a/Documentation/Books/Manual/Upgrading/Kubernetes/README.md +++ /dev/null @@ -1,5 +0,0 @@ -Upgrading Kubernetes Deployments -================================ - -Please refer to the [Upgrading Kubernetes](../../Deployment/Kubernetes/Upgrading.md) -page in the Deployment section. diff --git a/Documentation/Books/Manual/Upgrading/Manually/ActiveFailover.md b/Documentation/Books/Manual/Upgrading/Manually/ActiveFailover.md deleted file mode 100644 index c6ecc849122b..000000000000 --- a/Documentation/Books/Manual/Upgrading/Manually/ActiveFailover.md +++ /dev/null @@ -1,208 +0,0 @@ -Manually Upgrading an _Active Failover_ Deployment -========================================= - -This page will guide you through the process of a manual upgrade of an [_Active Failover_](../../Architecture/DeploymentModes/ActiveFailover/README.md) -setup. The different nodes can be upgraded one at a time without -incurring a _prolonged_ downtime of the entire system. The downtimes of the individual nodes -should also stay fairly low. - -The manual upgrade procedure described in this section can be used to upgrade -to a new hotfix version, or to perform an upgrade to a new minor version of ArangoDB. -Please refer to the [Upgrade Paths](../GeneralInfo/README.md#upgrade-paths) section -for detailed information. - -Preparations ------------- - -The ArangoDB installation packages (e.g. for Debian or Ubuntu) set up a -convenient standalone instance of `arangod`. During installation, this instance's -database will be upgraded (see [`--database.auto-upgrade`](../../Programs/Arangod/Database.md#auto-upgrade)) -and the service will be (re)started. - -You have to make sure that your _Active Failover_ deployment is independent of this -standalone instance. Specifically, make sure that the database directory as -well as the socket used by the standalone instance provided by the package are -separate from the ones in your _Active Failover_ configuration. Also, that you haven't -modified the init script or systemd unit file for the standalone instance in a way -that it would start or stop your _Active Failover_ instance instead. - -### Install the new ArangoDB version binary - -The first step is to install the new ArangoDB package. - -**Note:** you do not have to stop the _Active Failover_ (_arangod_) processes before upgrading it. - -For example, if you want to upgrade to `3.3.16` on Debian or Ubuntu, either call - -``` -$ apt install arangodb=3.3.16-1 -``` - -(`apt-get` on older versions) if you have added the ArangoDB repository. Or -install a specific package using - -``` -$ dpkg -i arangodb3-3.3.16-1_amd64.deb -``` - -after you have downloaded the corresponding file from https://download.arangodb.com/. - - -#### Stop the Standalone Instance - -As the package will automatically start the standalone instance, you might want to -stop that instance now, as otherwise it can create some confusion later. As you are -starting the _Active Failover_ processes manually -you will not need the automatically installed and started standalone instance, -and you should hence stop it via: - -``` -$ service arangodb3 stop -``` - -Also, you might want to remove the standalone instance from the default -_runlevels_ to prevent it from starting on the next reboots of your machine. How this -is done depends on your distribution and _init_ system. For example, on older Debian -and Ubuntu systems using a SystemV-compatible _init_, you can use: - -``` -$ update-rc.d -f arangodb3 remove -``` - -Set supervision into maintenance mode -------------------------------------- - -**Important**: Supervision maintenance mode is supported from ArangoDB versions -3.3.8/3.2.14 or higher. - -You have two main choices when performing an upgrade of the _Active Failover_ setup: - -- Upgrade while incurring a leader-to-follower switch (with reduced downtime) -- An upgrade with no leader-to-follower switch. - -Turning the maintenance mode _on_ will enable the latter case. You might have a short -downtime during the _leader_ upgrade, but there will be no potential loss of _acknowledged_ operations. - -To enable the maintenance mode means to essentially disable the Agency supervision for a limited amount -of time during the upgrade procedure. The following API calls will -activate and deactivate the maintenance mode of the supervision job. You might use _curl_ to send the API calls. -The following examples assume there is an _Active Failover_ node running on `localhost` on port 7002. - -### Activate Maintenance mode - -`curl -u username:password /_admin/cluster/maintenance -XPUT -d'"on"'` - -For example: -``` -curl -u "root:" http://localhost:7002/_admin/cluster/maintenance -XPUT -d'"on"' - -{"error":false,"warning":"Cluster supervision deactivated. -It will be reactivated automatically in 60 minutes unless this call is repeated until then."} -``` -**Note:** In case the manual upgrade takes longer than 60 minutes, the API call has to be resent. - - -### Deactivate Maintenance mode - -The _cluster_ supervision resumes automatically 60 minutes after disabling it. -It can be manually reactivated earlier at any point using the following API call: - -`curl -u username:password /_admin/cluster/maintenance -XPUT -d'"off"'` - -For example: -``` -curl -u "root:" http://localhost:7002/_admin/cluster/maintenance -XPUT -d'"off"' - -{"error":false,"warning":"Cluster supervision reactivated."} -``` - -Upgrade the _Active Failover_ processes ---------------------------------------- - -Now all the _Active Failover_ (_Agents_, _Single-Server_) processes (_arangod_) have to be -upgraded on each node. - -**Note:** Please read the section regarding the maintenance mode above. - -In order to stop an _arangod_ process we will need to use a command like `kill -15`: - -``` -kill -15 -``` - -The _pid_ associated to your _Active Failover setup_ can be checked using a command like _ps_: - - -``` -ps -C arangod -fww -``` - -The output of the command above does not only show the process ids of all _arangod_ -processes but also the used commands, which is useful for the following -restarts of all _arangod_ processes. - -The output below is from a test machine where three _Agents_ and two _Single-Servers_ -were running locally. In a more production-like scenario, you will find only one instance of each -type running per machine: - -``` -ps -C arangod -fww -UID PID PPID C STIME TTY TIME CMD -max 29075 8072 0 13:50 pts/2 00:00:42 arangod --server.endpoint tcp://0.0.0.0:5001 --agency.my-address=tcp://127.0.0.1:5001 --server.authentication false --agency.activate true --agency.size 3 --agency.endpoint tcp://127.0.0.1:5001 --agency.supervision true --log.file a1 --javascript.app-path /tmp --database.directory agent1 -max 29208 8072 2 13:51 pts/2 00:02:08 arangod --server.endpoint tcp://0.0.0.0:5002 --agency.my-address=tcp://127.0.0.1:5002 --server.authentication false --agency.activate true --agency.size 3 --agency.endpoint tcp://127.0.0.1:5001 --agency.supervision true --log.file a2 --javascript.app-path /tmp --database.directory agent2 -max 29329 16224 0 13:51 pts/3 00:00:42 arangod --server.endpoint tcp://0.0.0.0:5003 --agency.my-address=tcp://127.0.0.1:5003 --server.authentication false --agency.activate true --agency.size 3 --agency.endpoint tcp://127.0.0.1:5001 --agency.supervision true --log.file a3 --javascript.app-path /tmp --database.directory agent3 -max 29824 16224 1 13:55 pts/3 00:01:53 arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:7001 --cluster.my-address tcp://127.0.0.1:7001 --cluster.my-role SINGLE --cluster.agency-endpoint tcp://127.0.0.1:5001 --cluster.agency-endpoint tcp://127.0.0.1:5002 --cluster.agency-endpoint tcp://127.0.0.1:5003 --log.file c1 --javascript.app-path /tmp --database.directory single1 -max 29938 16224 2 13:56 pts/3 00:02:13 arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:7002 --cluster.my-address tcp://127.0.0.1:7002 --cluster.my-role SINGLE --cluster.agency-endpoint tcp://127.0.0.1:5001 --cluster.agency-endpoint tcp://127.0.0.1:5002 --cluster.agency-endpoint tcp://127.0.0.1:5003 --log.file c2 --javascript.app-path /tmp --database.directory single2 -``` - -**Note:** The start commands of _Agent_ and _Single Server_ are required for restarting the processes later. - -The recommended procedure for upgrading an _Active Failover_ setup is to stop, upgrade -and restart the _arangod_ instances one by one on all participating servers, -starting first with all _Agent_ instances, and then following with the _Active Failover_ -instances themselves. When upgrading the _Active Failover_ instances, the followers should -be upgraded first. - -To figure out the node containing the followers you can consult the cluster endpoints API: -``` -curl http://:7002/_api/cluster/endpoints -``` -This will yield a list of endpoints, the _first_ of which is always the leader node. - - -### Stopping, upgrading and restarting an instance - -To stop an instance, the currently running process has to be identified using the `ps` -command above. - -Let's assume we are about to upgrade an _Agent_ instance, so we have to look in the `ps` -output for an agent instance first, and note its process id (pid) and start command. - -The process can then be stopped using the following command: - -``` -kill -15 -``` - -The instance then has to be upgraded using the same command that was used before (in the `ps` output), -but with the additional option: - -``` ---database.auto-upgrade=true -``` - -After the upgrade procecure has finishing successfully, the instance will remain stopped. -So it has to be restarted using the command from the `ps` output before -(this time without the `--database.auto-upgrade` option). - - -Once an _Agent_ was upgraded and restarted successfully, repeat the procedure for the -other _Agent_ instances in the setup and then repeat the procedure for the _Active Failover_ -instances, there starting with the followers. - -### Final words - -The _Agency_ supervision then needs to be reactivated by issuing the following API call -to the leader: - -`curl -u username:password /_admin/cluster/maintenance -XPUT -d'"off"'` diff --git a/Documentation/Books/Manual/Upgrading/Manually/Cluster.md b/Documentation/Books/Manual/Upgrading/Manually/Cluster.md deleted file mode 100644 index 245a3093e983..000000000000 --- a/Documentation/Books/Manual/Upgrading/Manually/Cluster.md +++ /dev/null @@ -1,233 +0,0 @@ -Manually Upgrading a _Cluster_ Deployment -========================================= - -This page will guide you through the process of a manual upgrade of a [_cluster_](../../Architecture/DeploymentModes/Cluster/README.md) -setup. The different nodes in a _cluster_ can be upgraded one at a time without -incurring downtime of the _cluster_ and very short downtimes of the single nodes. - -The manual upgrade procedure described in this _Section_ can be used to upgrade -to a new hotfix, or to perform an upgrade to a new minor version of ArangoDB. -Please refer to the [Upgrade Paths](../GeneralInfo/README.md#upgrade-paths) section -for detailed information. - -Preparations ------------- - -The ArangoDB installation packages (e.g. for Debian or Ubuntu) set up a -convenient standalone instance of `arangod`. During installation, this instance's -database will be upgraded (see [`--database.auto-upgrade`](../../Programs/Arangod/Database.md#auto-upgrade)) -and the service will be (re)started. - -You have to make sure that your _cluster_ deployment is independent of this -standalone instance. Specifically, make sure that the database directory as -well as the socket used by the standalone instance provided by the package are -separate from the ones in your _cluster_ configuration. Also, that you haven't -modified the init script or systemd unit file for the standalone instance in way -that it would start or stop your _cluster_ instance instead. - -You can read about the details on how to deploy your _cluster_ indendently of the -standalone instance in the [_cluster_ deployment preliminary](../../Deployment/Cluster/PreliminaryInformation.md). - -In the following, we assume that you don't use the standalone instance from the -package but only a manually started _cluster_ instance, and we will move the -standalone instance out of the way if necessary so you have to make as little -changes as possible to the running _cluster_. - -### Install the new ArangoDB version binary - -The first step is to install the new ArangoDB package. - -**Note:** you do not have to stop the _cluster_ (_arangod_) processes before upgrading it. - -For example, if you want to upgrade to `3.3.9-1` on Debian or Ubuntu, either call - -``` -$ apt install arangodb=3.3.9 -``` - -(`apt-get` on older versions) if you have added the ArangoDB repository. Or -install a specific package using - -``` -$ dpkg -i arangodb3-3.3.9-1_amd64.deb -``` - -after you have downloaded the corresponding file from https://download.arangodb.com/. - -#### Stop the Standalone Instance - -As the package will automatically start the standalone instance, you might want to -stop it now, as otherwise this standalone instance that is started on your machine -can create some confusion later. As you are starting the _cluster_ processes manually -you do not need this standalone instance, and you can hence stop it: - -``` -$ service arangodb3 stop -``` - -Also, you might want to remove the standalone instance from the default -_runlevels_ to prevent it to start on the next reboot of your machine. How this -is done depends on your distribution and _init_ system. For example, on older Debian -and Ubuntu systems using a SystemV-compatible _init_, you can use: - -``` -$ update-rc.d -f arangodb3 remove -``` - -Set supervision in maintenance mode ------------------------------------ - -**Important**: Maintenance mode is supported from versions 3.3.8/3.2.14. - -It is required to disable _cluster_ supervision in order to upgrade your _cluster_. The -following API calls will activate and de-activate the Maintenance mode of the Supervision job: - -You might use _curl_ to send the API call. - -### Activate Maintenance mode - -`curl -u username:password /_admin/cluster/maintenance -XPUT -d'"on"'` - -For Example: -``` -curl http://localhost:7002/_admin/cluster/maintenance -XPUT -d'"on"' - -{"error":false,"warning":"Cluster supervision deactivated. -It will be reactivated automatically in 60 minutes unless this call is repeated until then."} -``` -**Note:** In case the manual upgrade takes longer than 60 minutes, the API call has to be resend. - -### Deactivate Maintenance mode - -The _cluster_ supervision reactivates 60 minutes after disabling it. -It can be manually reactivated by the following API call: - -`curl -u username:password /_admin/cluster/maintenance -XPUT -d'"off"'` - -For example: -``` -curl http://localhost:7002/_admin/cluster/maintenance -XPUT -d'"off"' - -{"error":false,"warning":"Cluster supervision reactivated."} -``` - -Upgrade the _cluster_ processes -------------------------------- - -Now all the _cluster_ (_Agents_, _DBServers_ and _Coordinators_) processes (_arangod_) have to be -upgraded on each node. - -**Note:** The maintenance mode has to be activated. - -In order to stop the _arangod_ processes we will need to use a command like `kill -15`: - -``` -kill -15 -``` - -The _pid_ associated to your _cluster_ can be checked using a command like _ps_: - - -``` -ps -C arangod -fww -``` - -The output of the command above does not only show the PID's of all _arangod_ -processes but also the used commands, which can be useful for the following -restart of all _arangod_ processes. - -The output below is from a test machine where three _Agents_, two _DBServers_ -and two _Coordinators_ are running locally. In a more production-like scenario, -you will find only one instance of each one running: - -``` -ps -C arangod -fww -UID PID PPID C STIME TTY TIME CMD -max 29075 8072 0 13:50 pts/2 00:00:42 arangod --server.endpoint tcp://0.0.0.0:5001 --agency.my-address=tcp://127.0.0.1:5001 --server.authentication false --agency.activate true --agency.size 3 --agency.endpoint tcp://127.0.0.1:5001 --agency.supervision true --log.file a1 --javascript.app-path /tmp --database.directory agent1 -max 29208 8072 2 13:51 pts/2 00:02:08 arangod --server.endpoint tcp://0.0.0.0:5002 --agency.my-address=tcp://127.0.0.1:5002 --server.authentication false --agency.activate true --agency.size 3 --agency.endpoint tcp://127.0.0.1:5001 --agency.supervision true --log.file a2 --javascript.app-path /tmp --database.directory agent2 -max 29329 16224 0 13:51 pts/3 00:00:42 arangod --server.endpoint tcp://0.0.0.0:5003 --agency.my-address=tcp://127.0.0.1:5003 --server.authentication false --agency.activate true --agency.size 3 --agency.endpoint tcp://127.0.0.1:5001 --agency.supervision true --log.file a3 --javascript.app-path /tmp --database.directory agent3 -max 29461 16224 1 13:53 pts/3 00:01:11 arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:6001 --cluster.my-address tcp://127.0.0.1:6001 --cluster.my-role PRIMARY --cluster.agency-endpoint tcp://127.0.0.1:5001 --cluster.agency-endpoint tcp://127.0.0.1:5002 --cluster.agency-endpoint tcp://127.0.0.1:5003 --log.file db1 --javascript.app-path /tmp --database.directory dbserver1 -max 29596 8072 0 13:54 pts/2 00:00:56 arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:6002 --cluster.my-address tcp://127.0.0.1:6002 --cluster.my-role PRIMARY --cluster.agency-endpoint tcp://127.0.0.1:5001 --cluster.agency-endpoint tcp://127.0.0.1:5002 --cluster.agency-endpoint tcp://127.0.0.1:5003 --log.file db2 --javascript.app-path /tmp --database.directory dbserver2 -max 29824 16224 1 13:55 pts/3 00:01:53 arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:7001 --cluster.my-address tcp://127.0.0.1:7001 --cluster.my-role COORDINATOR --cluster.agency-endpoint tcp://127.0.0.1:5001 --cluster.agency-endpoint tcp://127.0.0.1:5002 --cluster.agency-endpoint tcp://127.0.0.1:5003 --log.file c1 --javascript.app-path /tmp --database.directory coordinator1 -max 29938 16224 2 13:56 pts/3 00:02:13 arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:7002 --cluster.my-address tcp://127.0.0.1:7002 --cluster.my-role COORDINATOR --cluster.agency-endpoint tcp://127.0.0.1:5001 --cluster.agency-endpoint tcp://127.0.0.1:5002 --cluster.agency-endpoint tcp://127.0.0.1:5003 --log.file c2 --javascript.app-path /tmp --database.directory coordinator2 - -``` - -### Upgrade a _cluster_ node - -The following procedure is upgrading _Agent_, _DBServer_ and _Coordinator_ on one node. - -**Note:** The starting commands of _Agent_, _DBServer_ and _Coordinator_ have to be reused. - -#### Stop the _Agent_ - -``` -kill -15 -``` - -#### Upgrade the _Agent_ - -The _arangod_ process of the _Agent_ has to be upgraded using the same command that has -been used before with the additional option: - -``` ---database.auto-upgrade=true -``` - -The _Agent_ will stop automatically after the upgrade. - -#### Restart the _Agent_ - -The _arangod_ process of the _Agent_ has to be restarted using the same command that has -been used before (without the additional option). - -#### Stop the _DBServer_ - -``` -kill -15 -``` - -#### Upgrade the _DBServer_ - -The _arangod_ process of the _DBServer_ has to be upgraded using the same command that has -been used before with the additional option: - -``` ---database.auto-upgrade=true -``` - -The _DBServer_ will stop automatically after the upgrade. - -#### Restart the _DBServer_ - -The _arangod_ process of the _DBServer_ has to be restarted using the same command that has -been used before (without the additional option). - -#### Stop the _Coordinator_ - -``` -kill -15 -``` - -#### Upgrade the _Coordinator_ - -The _arangod_ process of the _Coordinator_ has to be upgraded using the same command that has -been used before with the additional option: - -``` ---database.auto-upgrade=true -``` - -The _Coordinator_ will stop automatically after the upgrade. - -#### Restart the _Coordinator_ - -The _arangod_ process of the _Coordinator_ has to be restarted using the same command that has -been used before (without the additional option). - -After repeating this process on every node all _Agents_, _DBServers_ and _Coordinators_ are upgraded and the manual upgrade -has successfully finished. - -The _cluster_ supervision is reactivated by the API call: - -`curl -u username:password /_admin/cluster/maintenance -XPUT -d'"off"'` diff --git a/Documentation/Books/Manual/Upgrading/Manually/README.md b/Documentation/Books/Manual/Upgrading/Manually/README.md deleted file mode 100644 index 91cd15254071..000000000000 --- a/Documentation/Books/Manual/Upgrading/Manually/README.md +++ /dev/null @@ -1,11 +0,0 @@ -Upgrading Manual Deployments -============================ - -It is possible to upgrade ArangoDB setups manually without the help of any -of our deployment tools. Be aware that you need to be careful when following the steps, -otherwise you may risk downtime or losing data (depending on the specific deployment). - -Available Guides: - -- Manually Upgrade an [Active Failover Deployment](ActiveFailover.md) -- Manually Upgrade a [Cluster Deployment](Cluster.md) diff --git a/Documentation/Books/Manual/Upgrading/OSSpecificInfo/Linux.md b/Documentation/Books/Manual/Upgrading/OSSpecificInfo/Linux.md deleted file mode 100644 index db4867bda564..000000000000 --- a/Documentation/Books/Manual/Upgrading/OSSpecificInfo/Linux.md +++ /dev/null @@ -1,64 +0,0 @@ -Upgrading on Linux -================== - -By installing the new ArangoDB package the standalone instance is automatically -upgraded. In addition to the ArangoDB daemon (_arangod_), also the ArangoDB -_Starter_ binary is updated. As a result, the procedure described in this _Section_ -is a first step to upgrade more complex deployments such as [Cluster](../../Architecture/DeploymentModes/Cluster/README.md) -or [Active Failover](../../Architecture/DeploymentModes/ActiveFailover/README.md). - -Upgrading via APT (Ubuntu) --------------------------- - -First add the repository key to _apt_: - -``` -curl -OL https://download.arangodb.com/arangodb33/xUbuntu_17.04/Release.key -sudo apt-key add - < Release.key -``` - -Use **apt-get** to install arangodb: - -``` -echo 'deb https://download.arangodb.com/arangodb33/xUbuntu_17.04/ /' | sudo tee /etc/apt/sources.list.d/arangodb.list -sudo apt-get install apt-transport-https -sudo apt-get update -sudo apt-get install arangodb3=3.3.10 -``` - -**Note**: The latest available version can be found in the [download section](https://www.arangodb.com/download-major/ubuntu/). - -Upgrading via DPKG (Ubuntu) ---------------------------- - -Download the corresponding file from the [download section](https://download.arangodb.com/). - -Install a specific package using **dpkg**: - -``` -$ dpkg -i arangodb3-3.3.10-1_amd64.deb -``` - -Upgrading via YUM (CentOS) -------------------------- - -Use **yum** to install ArangoDB: - -``` -cd /etc/yum.repos.d/ -curl -OL https://download.arangodb.com/arangodb33/CentOS_7/arangodb.repo -yum -y install arangodb3-3.3.10 -``` - -**Note**: The latest available version can be found in the [download section](https://www.arangodb.com/download-major/centos/). - -Upgrading via RPM (CentOS) ---------------------------- - -Download the corresponding file from the [download section](https://download.arangodb.com/). - -Install a specific package using **rpm**: - -``` -$ rpm -i arangodb3-3.3.10-1.x86_64.rpm -``` diff --git a/Documentation/Books/Manual/Upgrading/OSSpecificInfo/MacOS.md b/Documentation/Books/Manual/Upgrading/OSSpecificInfo/MacOS.md deleted file mode 100644 index 8fdb3ee43448..000000000000 --- a/Documentation/Books/Manual/Upgrading/OSSpecificInfo/MacOS.md +++ /dev/null @@ -1,40 +0,0 @@ -Upgrading on macOS -================== - -This _Section_ describes upgrading an ArangoDB single-server installation, which -was installed via Homebrew or via the provided ArangoDB packages (*.dmg). - -Upgrading via Homebrew --------------------------- - -First update the homebrew repository: - -``` -brew update -``` - -Then use **brew** to install the latest version of arangodb: - -``` -brew upgrade arangodb -``` - -Upgrading via Package --------------------------- - -[Download](https://www.arangodb.com/download/) the latest ArangoDB macOS package and install it as usual by -mounting the `.dmg` file. Just drag and drop the `ArangoDB3-CLI` (community) or -the `ArangoDB3e-CLI` (enterprise) file into the shown `Applications` folder. -You will be asked if you want to replace the old file with the newer one. - -![MacOSUpgrade](MacOSUpgrade.png) - -Select `Replace` to install the current ArangoDB version. - -Upgrading more complex environments --------------------------- - -The procedure described in this _Section_ -is a first step to upgrade more complex deployments such as -[Cluster](../../Architecture/DeploymentModes/Cluster/README.md) -or [Active Failover](../../Architecture/DeploymentModes/ActiveFailover/README.md). diff --git a/Documentation/Books/Manual/Upgrading/OSSpecificInfo/MacOSUpgrade.png b/Documentation/Books/Manual/Upgrading/OSSpecificInfo/MacOSUpgrade.png deleted file mode 100644 index c44bab0ea43f..000000000000 Binary files a/Documentation/Books/Manual/Upgrading/OSSpecificInfo/MacOSUpgrade.png and /dev/null differ diff --git a/Documentation/Books/Manual/Upgrading/OSSpecificInfo/README.md b/Documentation/Books/Manual/Upgrading/OSSpecificInfo/README.md deleted file mode 100644 index a8a0317bde0a..000000000000 --- a/Documentation/Books/Manual/Upgrading/OSSpecificInfo/README.md +++ /dev/null @@ -1,6 +0,0 @@ -OS-specific Information -======================= - -- [Upgrading on Linux](Linux.md) -- [Upgrading on macOS](MacOS.md) -- [Upgrading on Windows](Windows.md) \ No newline at end of file diff --git a/Documentation/Books/Manual/Upgrading/OSSpecificInfo/Windows.md b/Documentation/Books/Manual/Upgrading/OSSpecificInfo/Windows.md deleted file mode 100644 index 4a5abdeffe8f..000000000000 --- a/Documentation/Books/Manual/Upgrading/OSSpecificInfo/Windows.md +++ /dev/null @@ -1,115 +0,0 @@ -Upgrading on Windows -==================== - -As there are different ways to install ArangoDB on Windows, the upgrade -method depends on the installation method that was used. - -In general, it will be needed to: - -- Install (or unpack) the new ArangoDB binaries on the system -- Upgrade the current database (or perform a restore) -- Optional (but suggested) to keep the system clean (unless there are specific - reasons to not do so): remove the old binaries from the system - -Some of the above steps may be done automatically, depending on your -specifc situation. - -Upgrading via the Installer ---------------------------- - -If you have installed via the _Installer_, to upgrade: - -- Download the new _Installer_ and run it. -- The _Installer_ will ask if you want to update your current database: select - the option "_Automatically update existing ArangoDB database_" so that the database - files will be upgraded. - -![Update Option](installer_upgrade.png) - -{% hint 'info' %} -Upgrading via the Installer, when the old data is kept, will keep your -password and choice of storage engine as it is. -{% endhint %} - -- After installing the new package, you will have both packages installed. -- You can uninstall the old one manually (make a copy of your old configuration -file first). - -![Uninstall old version](both_installations.png) - -{% hint 'danger' %} -When uninstalling the old package, please make sure the option -"_Delete databases with unistallation_" is **not** checked. -{% endhint %} - -![Delete Option](installer_delete.png) - -{% hint 'danger' %} -When upgrading, the Windows Installer does not use the old configuration file -for the installed _Single Instance_ but a new (default) one ([Issue #3773](https://github.com/arangodb/arangodb/issues/3773)). -To use the old configuration, it is currently needed to: -- Stop the server -- Replace the new with the old confituration file -- Restart the server -{% endhint %} - -Manual upgrade of a 'ZIP archive' installation ----------------------------------------------- - -There are two ways to upgrade a _Single Instance_ that has been started -from a _ZIP_ package: - -- In-Place upgrade -- Logical upgrade - -### In-Place upgrade - -{% hint 'info' %} This method works easier if: -- You are using a data directory which is located outside of the directory - created when extracting the _ZIP_ archive (data directory can be set via - the server option *--database.directory*) -- You are using a configuration file which is located outside of the directory - created when extracting the _ZIP_ archive (a configuration file can be passed via - the server option *--configuration*) -{% endhint %} - -Assuming that: -- Your data directory is _directory1_ (e.g. "D:\arango\data") -- Your configuration file is _file_ (e.g. "D:\arango\conf\arangod.conf") -- Your old binaries are on _directory2_ (e.g. "C:\tools\arangodb-3.4.0") - -to perform the upgrade of a _Single Instance_: - -1. Download and extract the new _ZIP_ package into a new directory (e.g - _directory3_ "C:\tools\arangodb-3.4.1") -2. Stop your old server -3. Start again the server (this time using the binary located in _directory3_) - passing: - - _directory1_ as *--database.directory*, - - _file_ as *--configuration* - - the option *--database.auto-upgrade* (so that the old data directory will - be upgraded) -4. When the previous step is finished the server will stop automatically; you - can now start your server again as done in the previous step but without - passing the *--database.auto-upgrade* option -5. Optionally remove the old server package by dropping the corresponding - directory when you are confident enough that all is working fine. - -### Logical upgrade - -To perform the upgrade of a _Single Instance_: - -1. Download the new package and extract it on a different location than the - previous one -2. Stop writes to the old server (e.g. block incoming connections) -3. Take a backup of the data using _arangodump_ -4. Stop the old server -5. Optional (depending on whether or not you modfied default confiugaration), - copy old ArangoDB configuration file to the new server (or just edit - the new configuration file) -6. Start the new server (with a fresh data directory, by default it will be - inside the directory created when extracting the _ZIP_ archive) -7. Restore the backup into the new server using _arangorestore_ -8. Re-enable the writes (e.g. allow again incoming connections) -9. Optionally remove the old server package by dropping the corresponding - directory when you are confident enough that all is working fine. diff --git a/Documentation/Books/Manual/Upgrading/OSSpecificInfo/both_installations.png b/Documentation/Books/Manual/Upgrading/OSSpecificInfo/both_installations.png deleted file mode 100644 index 863d5f8f551d..000000000000 Binary files a/Documentation/Books/Manual/Upgrading/OSSpecificInfo/both_installations.png and /dev/null differ diff --git a/Documentation/Books/Manual/Upgrading/OSSpecificInfo/installer_delete.png b/Documentation/Books/Manual/Upgrading/OSSpecificInfo/installer_delete.png deleted file mode 100644 index 3d1ee54e1dcd..000000000000 Binary files a/Documentation/Books/Manual/Upgrading/OSSpecificInfo/installer_delete.png and /dev/null differ diff --git a/Documentation/Books/Manual/Upgrading/OSSpecificInfo/installer_upgrade.png b/Documentation/Books/Manual/Upgrading/OSSpecificInfo/installer_upgrade.png deleted file mode 100644 index 88590b290a9a..000000000000 Binary files a/Documentation/Books/Manual/Upgrading/OSSpecificInfo/installer_upgrade.png and /dev/null differ diff --git a/Documentation/Books/Manual/Upgrading/README.md b/Documentation/Books/Manual/Upgrading/README.md deleted file mode 100644 index 2d601dd66bb1..000000000000 --- a/Documentation/Books/Manual/Upgrading/README.md +++ /dev/null @@ -1,15 +0,0 @@ -Upgrading -========= - -This _Chapter_ includes information related to the upgrade of ArangoDB. - -For downgrade instructions, please refer to the [Downgrading](../Downgrading/README.md) -chapter. - -- [General Upgrade Information](GeneralInfo/README.md) -- [Community to Enterprise Upgrade](CommunityToEnterprise.md) -- [OS-specific Information](OSSpecificInfo/README.md) -- [Upgrading _Starter_ Deployments](Starter/README.md) -- [Upgrading Manual Deployments](Manually/README.md) -- [Upgrading Kubernetes Deployments](Kubernetes/README.md) -- [Version Specific Upgrade Information](VersionSpecific/README.md) diff --git a/Documentation/Books/Manual/Upgrading/Starter/README.md b/Documentation/Books/Manual/Upgrading/Starter/README.md deleted file mode 100644 index 4adae455d709..000000000000 --- a/Documentation/Books/Manual/Upgrading/Starter/README.md +++ /dev/null @@ -1,248 +0,0 @@ - -# Upgrading _Starter_ Deployments - -Starting from versions 3.2.15 and 3.3.8, the ArangoDB [_Starter_](../../Programs/Starter/README.md) -supports a new, automated, procedure to perform upgrades, including rolling upgrades -of a [Cluster](../../Architecture/DeploymentModes/Cluster/README.md) setup. - -The upgrade procedure of the _Starter_ described in this _Section_ can be used to -upgrade to a new hotfix, or to perform an upgrade to a new minor version of ArangoDB. -Please refer to the [Upgrade Paths](../GeneralInfo/README.md#upgrade-paths) section -for detailed information. - -**Important:** - -- Rolling upgrades of Cluster setups from 3.2 to 3.3 are only supported - from versions 3.2.15 and 3.3.9. -- Rolling upgrades of Cluster setups from 3.3 to 3.4 are only supported - from versions 3.3.20 and 3.4.0. - -## Upgrade Scenarios - -The following four cases are possible: - -1. You have installed via an installation package (e.g. a `.deb` or `.rpm` package) - and you will upgrade this installation using again an installation package - (e.g. a `.deb` or `.rpm`). -2. You have installed via the `.tar.gz` distribution and you will upgrade this - installation using again a `.tar.gz` distribution. -3. You have installed via an installation package (e.g. a `.deb` or `.rpm` package) - and you will upgrade this installation using a `.tar.gz` distribution. -4. You have installed via the `.tar.gz` distribution and you will upgrade this - installation using an installation package (e.g. a `.deb` or `.rpm` package). - -Cases 1. and 2. are more common, though cases 3. and 4. are also possible. - -## Upgrade Procedure - -The following procedure has to be executed on every ArangoDB _Starter_ instance. -It is assumed that a _Starter_ deployment with mode `single`, `activefailover` or -`cluster` is running. - -### Install the new ArangoDB version binary - -Installing the new ArangoDB version binary also includes the latest ArangoDB _Starter_ -binary, which is necessary to perform the rolling upgrade. - -The first step is to install the new ArangoDB package. - -**Note:** you do not have to stop the _Starter_ processes before upgrading it. - -For example, if you want to upgrade to `3.3.14-1` on Debian or Ubuntu, either call - -```bash -apt install arangodb=3.3.14 -``` - -(`apt-get` on older versions) if you have added the ArangoDB repository. Or -install a specific package using - -```bash -dpkg -i arangodb3-3.3.14-1_amd64.deb -``` - -after you have downloaded the corresponding file from https://www.arangodb.com/download/. - -If you are using the `.tar.gz` distribution (only available from v3.4.0), -you can simply extract the new archive in a different -location and keep the old installation where it is. Note that -this does not launch a standalone instance, so the following section can -be skipped in this case. - -#### Stop the Standalone Instance - -As the package will automatically start the standalone instance, you might want to -stop it now, as otherwise this standalone instance that is started on your machine -can create some confusion later. As you are using the _Starter_ you do not need -this standalone instance, and you can hence stop it: - -```bash -service arangodb3 stop -``` - -Also, you might want to remove the standalone instance from the default -_runlevels_ to prevent it to start on the next reboot of your machine. How this -is done depends on your distribution and _init_ system. For example, on older Debian -and Ubuntu systems using a SystemV-compatible _init_, you can use: - -```bash -update-rc.d -f arangodb3 remove -``` - -### Stop the _Starter_ without stopping the ArangoDB Server processes - -Now all the _Starter_ (_arangodb_) processes have to be stopped. - -Please note that **no** _arangod_ processes should be stopped. - -In order to stop the _arangodb_ processes, leaving the _arangod_ processes they -have started up and running (as we want for a rolling upgrade), we will need to -use a command like `kill -9`: - -```bash -kill -9 -``` - -The _pid_ associated to your _Starter_ can be checked using a command like _ps_: - -```bash -ps -C arangodb -fww -``` - -The output of the command above does not only show the PID's of all _arangodb_ -processes but also the used commands, which can be useful for the following -restart of all _arangodb_ processes. - -The output below is from a test machine where three instances of a _Starter_ are -running locally. In a more production-like scenario, you will find only one instance -of _arangodb_ running: - -```bash -ps -C arangodb -fww -UID PID PPID C STIME TTY TIME CMD -max 29419 3684 0 11:46 pts/1 00:00:00 arangodb --starter.data-dir=./db1 -max 29504 3695 0 11:46 pts/2 00:00:00 arangodb --starter.data-dir=./db2 --starter.join 127.0.0.1 -max 29513 3898 0 11:46 pts/4 00:00:00 arangodb --starter.data-dir=./db3 --starter.join 127.0.0.1 -``` - -### Restart the _Starter_ - -When using a supervisor like _SystemD_, this will happens automatically. In case -the _Starter_ was initiated manually, the _arangodb_ processes have to be restarted -manually with the same command that has been used before. - -If you are using the `.tar.gz` distribution (only available from v3.4.0), -your new version of the executable might be located in a -different directory. Make sure that you now start the new _Starter_ -executable (`bin/arangodb`) in the new installation place. If you are -using a supervisor like _SystemD_, you might have to adjust the path to -the executable in the service description to the new location. Do this -before you `kill -9` the _Starter_ or else the old version will be -restarted in this case. If you forgot, simply do the `kill -9` again. - -After you have restarted the _Starter_ you will find yourself in the following -situation: - -- The _Starter_ is up and running, and it is on the new version -- The ArangoDB Server processes are up and running, and they are still on the - old version - -### Start the upgrade process of all _arangod_ & _arangosync_ servers - -Run the following command: - -```bash -arangodb upgrade --starter.endpoint= -``` - -The `--starter.endpoint` option can be set to the endpoint of any -of the starters. E.g. `http://localhost:8528`. - -**Important:** - -The command above was introduced with 3.3.14 (and 3.2.17). If you are rolling upgrade a 3.3.x version -to a version higher or equal to 3.3.14, or if you are rolling upgrade a 3.2.x version to a version higher -or equal to 3.2.17 please use the command above. - -If you are doing the rolling upgrade of a 3.3.x version to a version between 3.3.8 and 3.3.13 (included), -or if you are rolling upgrade a 3.2.x version to 3.2.15 or 3.2.16, a different command has to be used -(on all _Starters_ one by one): - -``` -curl -X POST --dump - http://localhost:8538/database-auto-upgrade -``` - -#### Deployment mode `single` - -For deployment mode `single`, the `arangodb upgrade` command will: - -- Restart the single server with an additional `--database.auto-upgrade=true` argument. - The server will perform the auto-upgrade and then stop. - After that the _Starter_ will automatically restart it with its normal arguments. - -The `arangodb upgrade` command will complete right away. -Inspect the log of the _Starter_ to know when the upgrade has finished. - -#### Deployment mode `activefailover` or `cluster` - -The _Starters_ will now perform an initial check that upgrading is possible -and when that all succeeds, create an upgrade _plan_. This _plan_ is then -executed by every _Starter_. - -The `arangodb upgrade` command will show the progress of the upgrade -and stop when the upgrade has either finished successfully or finished -with an error. - -### Uninstall old package - -{% hint 'info' %} -This step is required in the cases 2., 3. and 4. only. It is not required -in case 1., see [Upgrade Scenarios](#upgrade-scenarios) above. -{% endhint %} - -After verifying your upgraded ArangoDB system is working, you can remove -the old package. This can be done in different ways, depending on the case -you are: - -- Cases 2. and 4.: just remove the old directory created by the `.tar.gz` - (assumes your `--starter.data-dir` is located outside of this - directory - which is a recommended approach). -- Case 3.: just remove the old package by running the corresponding - uninstallation command (the exact command depends on whether you are - using a `.deb` or `.rmp` package and it is assumed that your - `--starter.data-dir` is located outside of the standard directories - created by the installation package - which is a recommended approach). - -## Retrying a failed upgrade - -Starting with 3.3.14 and 3.2.17, when an upgrade _plan_ (in deployment -mode `activefailover` or `cluster`) has failed, it can be retried. - -To retry, run: - -```bash -arangodb retry upgrade --starter.endpoint= -``` - -The `--starter.endpoint` option can be set to the endpoint of any -of the starters. E.g. `http://localhost:8528`. - -## Aborting an upgrade - -Starting with 3.3.14 and 3.2.17, when an upgrade _plan_ (in deployment -mode `activefailover` or `cluster`) is in progress or has failed, it can -be aborted. - -To abort, run: - -```bash -arangodb abort upgrade --starter.endpoint= -``` - -The `--starter.endpoint` option can be set to the endpoint of any -of the starters. E.g. `http://localhost:8528`. - -Note that an abort does not stop all upgrade processes immediately. -If an _arangod_ or _arangosync_ server is being upgraded when the abort -was issued, this upgrade will be finished. Remaining servers will not be -upgraded. diff --git a/Documentation/Books/Manual/Upgrading/VersionSpecific/README.md b/Documentation/Books/Manual/Upgrading/VersionSpecific/README.md deleted file mode 100644 index 9359721502de..000000000000 --- a/Documentation/Books/Manual/Upgrading/VersionSpecific/README.md +++ /dev/null @@ -1,16 +0,0 @@ -Version Specific Upgrade Information -==================================== - -- Upgrading to 3.x: - [3.4](Upgrading34.md), - [3.3](Upgrading33.md), - [3.2](Upgrading32.md), - [3.1](Upgrading31.md), - [3.0](Upgrading30.md) -- Upgrading to 2.x: - [2.8](Upgrading28.md), - [2.6](Upgrading26.md), - [2.5](Upgrading25.md), - [2.4](Upgrading24.md), - [2.3](Upgrading23.md), - [2.2](Upgrading22.md) diff --git a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading22.md b/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading22.md deleted file mode 100644 index 2dbdc6dde13a..000000000000 --- a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading22.md +++ /dev/null @@ -1,118 +0,0 @@ -Upgrading to ArangoDB 2.2 -========================= - -Please read the following sections if you upgrade from a previous version to -ArangoDB 2.2. - -Please note first that a database directory used with ArangoDB 2.2 -cannot be used with earlier versions (e.g. ArangoDB 2.1) any -more. Upgrading a database directory cannot be reverted. Therefore -please make sure to create a full backup of your existing ArangoDB -installation before performing an upgrade. - -Database Directory Version Check and Upgrade --------------------------------------------- - -ArangoDB will perform a database version check at startup. When ArangoDB 2.2 -encounters a database created with earlier versions of ArangoDB, it will refuse -to start. This is intentional. - -The output will then look like this: - -``` -2014-07-07T22:04:53Z [18675] ERROR In database '_system': Database directory version (2.1) is lower than server version (2.2). -2014-07-07T22:04:53Z [18675] ERROR In database '_system': ---------------------------------------------------------------------- -2014-07-07T22:04:53Z [18675] ERROR In database '_system': It seems like you have upgraded the ArangoDB binary. -2014-07-07T22:04:53Z [18675] ERROR In database '_system': If this is what you wanted to do, please restart with the -2014-07-07T22:04:53Z [18675] ERROR In database '_system': --upgrade -2014-07-07T22:04:53Z [18675] ERROR In database '_system': option to upgrade the data in the database directory. -2014-07-07T22:04:53Z [18675] ERROR In database '_system': Normally you can use the control script to upgrade your database -2014-07-07T22:04:53Z [18675] ERROR In database '_system': /etc/init.d/arangodb stop -2014-07-07T22:04:53Z [18675] ERROR In database '_system': /etc/init.d/arangodb upgrade -2014-07-07T22:04:53Z [18675] ERROR In database '_system': /etc/init.d/arangodb start -2014-07-07T22:04:53Z [18675] ERROR In database '_system': ---------------------------------------------------------------------- -2014-07-07T22:04:53Z [18675] FATAL Database version check failed for '_system'. Please start the server with the --upgrade option -``` - -To make ArangoDB 2.2 start with a database directory created with an earlier -ArangoDB version, you may need to invoke the upgrade procedure once. This can -be done by running ArangoDB from the command line and supplying the `--upgrade` -option: - - unix> arangod data --upgrade - -where `data` is ArangoDB's main data directory. - -Note: here the same database should be specified that is also specified when -arangod is started regularly. Please do not run the `--upgrade` command on each -individual database subfolder (named `database-`). - -For example, if you regularly start your ArangoDB server with - - unix> arangod mydatabasefolder - -then running - - unix> arangod mydatabasefolder --upgrade - -will perform the upgrade for the whole ArangoDB instance, including all of its -databases. - -Starting with `--upgrade` will run a database version check and perform any -necessary migrations. As usual, you should create a backup of your database -directory before performing the upgrade. - -The output should look like this: -``` -2014-07-07T22:11:30Z [18867] INFO In database '_system': starting upgrade from version 2.1 to 2.2.0 -2014-07-07T22:11:30Z [18867] INFO In database '_system': Found 19 defined task(s), 2 task(s) to run -2014-07-07T22:11:30Z [18867] INFO In database '_system': upgrade successfully finished -2014-07-07T22:11:30Z [18867] INFO database upgrade passed -``` - -Please check the output the `--upgrade` run. It may produce errors, which need -to be fixed before ArangoDB can be used properly. If no errors are present or -they have been resolved, you can start ArangoDB 2.2 regularly. - -Upgrading a cluster planned in the web interface ------------------------------------------------- - -A cluster of ArangoDB instances has to be upgraded as well. This -involves upgrading all ArangoDB instances in the cluster, as well as -running the version check on the whole running cluster in the end. - -We have tried to make this procedure as painless and convenient for you. -We assume that you planned, launched and administrated a cluster using the -graphical front end in your browser. The upgrade procedure is then as -follows: - - 1. First shut down your cluster using the graphical front end as - usual. - - 2. Then upgrade all dispatcher instances on all machines in your - cluster using the version check as described above and restart them. - - 3. Now open the cluster dash board in your browser by pointing it to - the same dispatcher that you used to plan and launch the cluster in - the graphical front end. In addition to the usual buttons - "Relaunch", "Edit cluster plan" and "Delete cluster plan" you will - see another button marked "Upgrade and relaunch cluster". - - 4. Hit this button, your cluster will be upgraded and launched and - all is done for you behind the scenes. If all goes well, you will - see the usual cluster dash board after a few seconds. If there is - an error, you have to inspect the log files of your cluster - ArangoDB instances. Please let us know if you run into problems. - -There is an alternative way using the `ArangoDB` shell. Instead of -steps 3. and 4. above you can launch `arangosh`, point it to the dispatcher -that you have used to plan and launch the cluster using the option -``--server.endpoint``, and execute - - arangosh> require("org/arangodb/cluster").Upgrade("root",""); - -This upgrades the cluster and launches it, exactly as with the button -above in the graphical front end. You have to replace `"root"` with -a user name and `""` with a password that is valid for authentication -with the cluster. - diff --git a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading23.md b/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading23.md deleted file mode 100644 index d94716ab1490..000000000000 --- a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading23.md +++ /dev/null @@ -1,119 +0,0 @@ -Upgrading to ArangoDB 2.3 -========================= - -Please read the following sections if you upgrade from a previous version to -ArangoDB 2.3. Please be sure that you have checked the list of [changes in 2.3](../../ReleaseNotes/UpgradingChanges23.md) -before upgrading. - -Please note first that a database directory used with ArangoDB 2.3 -cannot be used with earlier versions (e.g. ArangoDB 2.2) any -more. Upgrading a database directory cannot be reverted. Therefore -please make sure to create a full backup of your existing ArangoDB -installation before performing an upgrade. - -Database Directory Version Check and Upgrade --------------------------------------------- - -ArangoDB will perform a database version check at startup. When ArangoDB 2.3 -encounters a database created with earlier versions of ArangoDB, it will refuse -to start. This is intentional. - -The output will then look like this: - -``` -2014-11-03T15:48:06Z [2694] ERROR In database '_system': Database directory version (2.2) is lower than current version (20300). -2014-11-03T15:48:06Z [2694] ERROR In database '_system': ---------------------------------------------------------------------- -2014-11-03T15:48:06Z [2694] ERROR In database '_system': It seems like you have upgraded the ArangoDB binary. -2014-11-03T15:48:06Z [2694] ERROR In database '_system': If this is what you wanted to do, please restart with the -2014-11-03T15:48:06Z [2694] ERROR In database '_system': --upgrade -2014-11-03T15:48:06Z [2694] ERROR In database '_system': option to upgrade the data in the database directory. -2014-11-03T15:48:06Z [2694] ERROR In database '_system': Normally you can use the control script to upgrade your database -2014-11-03T15:48:06Z [2694] ERROR In database '_system': /etc/init.d/arangodb stop -2014-11-03T15:48:06Z [2694] ERROR In database '_system': /etc/init.d/arangodb upgrade -2014-11-03T15:48:06Z [2694] ERROR In database '_system': /etc/init.d/arangodb start -2014-11-03T15:48:06Z [2694] ERROR In database '_system': ---------------------------------------------------------------------- -2014-11-03T15:48:06Z [2694] FATAL Database '_system' needs upgrade. Please start the server with the --upgrade option -``` - -To make ArangoDB 2.3 start with a database directory created with an earlier -ArangoDB version, you may need to invoke the upgrade procedure once. This can -be done by running ArangoDB from the command line and supplying the `--upgrade` -option: - - unix> arangod data --upgrade - -where `data` is ArangoDB's main data directory. - -Note: here the same database should be specified that is also specified when -arangod is started regularly. Please do not run the `--upgrade` command on each -individual database subfolder (named `database-`). - -For example, if you regularly start your ArangoDB server with - - unix> arangod mydatabasefolder - -then running - - unix> arangod mydatabasefolder --upgrade - -will perform the upgrade for the whole ArangoDB instance, including all of its -databases. - -Starting with `--upgrade` will run a database version check and perform any -necessary migrations. As usual, you should create a backup of your database -directory before performing the upgrade. - -The output should look like this: -``` -2014-11-03T15:48:47Z [2708] INFO In database '_system': Found 24 defined task(s), 5 task(s) to run -2014-11-03T15:48:47Z [2708] INFO In database '_system': state prod/standalone/upgrade, tasks updateUserModel, createStatistics, upgradeClusterPlan, setupQueues, setupJobs -2014-11-03T15:48:48Z [2708] INFO In database '_system': upgrade successfully finished -2014-11-03T15:48:48Z [2708] INFO database upgrade passed -``` - -Please check the output the `--upgrade` run. It may produce errors, which need -to be fixed before ArangoDB can be used properly. If no errors are present or -they have been resolved, you can start ArangoDB 2.3 regularly. - -Upgrading a cluster planned in the web interface ------------------------------------------------- - -A cluster of ArangoDB instances has to be upgraded as well. This -involves upgrading all ArangoDB instances in the cluster, as well as -running the version check on the whole running cluster in the end. - -We have tried to make this procedure as painless and convenient for you. -We assume that you planned, launched and administrated a cluster using the -graphical front end in your browser. The upgrade procedure is then as -follows: - - 1. First shut down your cluster using the graphical front end as - usual. - - 2. Then upgrade all dispatcher instances on all machines in your - cluster using the version check as described above and restart them. - - 3. Now open the cluster dash board in your browser by pointing it to - the same dispatcher that you used to plan and launch the cluster in - the graphical front end. In addition to the usual buttons - "Relaunch", "Edit cluster plan" and "Delete cluster plan" you will - see another button marked "Upgrade and relaunch cluster". - - 4. Hit this button, your cluster will be upgraded and launched and - all is done for you behind the scenes. If all goes well, you will - see the usual cluster dash board after a few seconds. If there is - an error, you have to inspect the log files of your cluster - ArangoDB instances. Please let us know if you run into problems. - -There is an alternative way using the `ArangoDB` shell. Instead of -steps 3. and 4. above you can launch `arangosh`, point it to the dispatcher -that you have used to plan and launch the cluster using the option -``--server.endpoint``, and execute - - arangosh> require("org/arangodb/cluster").Upgrade("root",""); - -This upgrades the cluster and launches it, exactly as with the button -above in the graphical front end. You have to replace `"root"` with -a user name and `""` with a password that is valid for authentication -with the cluster. - diff --git a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading24.md b/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading24.md deleted file mode 100644 index 1572c8b37205..000000000000 --- a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading24.md +++ /dev/null @@ -1,116 +0,0 @@ -Upgrading to ArangoDB 2.4 -========================= - -Please read the following sections if you upgrade from a previous version to -ArangoDB 2.4. Please be sure that you have checked the list of [changes in 2.4](../../ReleaseNotes/UpgradingChanges24.md) -before upgrading. - -Please note first that a database directory used with ArangoDB 2.4 -cannot be used with earlier versions (e.g. ArangoDB 2.3) any -more. Upgrading a database directory cannot be reverted. Therefore -please make sure to create a full backup of your existing ArangoDB -installation before performing an upgrade. - -Database Directory Version Check and Upgrade --------------------------------------------- - -ArangoDB will perform a database version check at startup. When ArangoDB 2.4 -encounters a database created with earlier versions of ArangoDB, it will refuse -to start. This is intentional. - -The output will then look like this: - -``` -2014-12-22T12:02:28Z [12001] ERROR In database '_system': Database directory version (20302) is lower than current version (20400). -2014-12-22T12:02:28Z [12001] ERROR In database '_system': ---------------------------------------------------------------------- -2014-12-22T12:02:28Z [12001] ERROR In database '_system': It seems like you have upgraded the ArangoDB binary. -2014-12-22T12:02:28Z [12001] ERROR In database '_system': If this is what you wanted to do, please restart with the -2014-12-22T12:02:28Z [12001] ERROR In database '_system': --upgrade -2014-12-22T12:02:28Z [12001] ERROR In database '_system': option to upgrade the data in the database directory. -2014-12-22T12:02:28Z [12001] ERROR In database '_system': Normally you can use the control script to upgrade your database -2014-12-22T12:02:28Z [12001] ERROR In database '_system': /etc/init.d/arangodb stop -2014-12-22T12:02:28Z [12001] ERROR In database '_system': /etc/init.d/arangodb upgrade -2014-12-22T12:02:28Z [12001] ERROR In database '_system': /etc/init.d/arangodb start -2014-12-22T12:02:28Z [12001] ERROR In database '_system': ---------------------------------------------------------------------- -2014-12-22T12:02:28Z [12001] FATAL Database '_system' needs upgrade. Please start the server with the --upgrade option -``` - -To make ArangoDB 2.4 start with a database directory created with an earlier -ArangoDB version, you may need to invoke the upgrade procedure once. This can -be done by running ArangoDB from the command line and supplying the `--upgrade` -option: - - unix> arangod data --upgrade - -where `data` is ArangoDB's main data directory. - -Note: here the same database should be specified that is also specified when -arangod is started regularly. Please do not run the `--upgrade` command on each -individual database subfolder (named `database-`). - -For example, if you regularly start your ArangoDB server with - - unix> arangod mydatabasefolder - -then running - - unix> arangod mydatabasefolder --upgrade - -will perform the upgrade for the whole ArangoDB instance, including all of its -databases. - -Starting with `--upgrade` will run a database version check and perform any -necessary migrations. As usual, you should create a backup of your database -directory before performing the upgrade. - -The last line of the output should look like this: -``` -2014-12-22T12:03:31Z [12026] INFO database upgrade passed -``` - -Please check the full output the `--upgrade` run. Upgrading may produce errors, which need -to be fixed before ArangoDB can be used properly. If no errors are present or -they have been resolved manually, you can start ArangoDB 2.4 regularly. - -Upgrading a cluster planned in the web interface ------------------------------------------------- - -A cluster of ArangoDB instances has to be upgraded as well. This -involves upgrading all ArangoDB instances in the cluster, as well as -running the version check on the whole running cluster in the end. - -We have tried to make this procedure as painless and convenient for you. -We assume that you planned, launched and administrated a cluster using the -graphical front end in your browser. The upgrade procedure is then as -follows: - - 1. First shut down your cluster using the graphical front end as - usual. - - 2. Then upgrade all dispatcher instances on all machines in your - cluster using the version check as described above and restart them. - - 3. Now open the cluster dash board in your browser by pointing it to - the same dispatcher that you used to plan and launch the cluster in - the graphical front end. In addition to the usual buttons - "Relaunch", "Edit cluster plan" and "Delete cluster plan" you will - see another button marked "Upgrade and relaunch cluster". - - 4. Hit this button, your cluster will be upgraded and launched and - all is done for you behind the scenes. If all goes well, you will - see the usual cluster dash board after a few seconds. If there is - an error, you have to inspect the log files of your cluster - ArangoDB instances. Please let us know if you run into problems. - -There is an alternative way using the `ArangoDB` shell. Instead of -steps 3. and 4. above you can launch `arangosh`, point it to the dispatcher -that you have used to plan and launch the cluster using the option -``--server.endpoint``, and execute - - arangosh> require("org/arangodb/cluster").Upgrade("root",""); - -This upgrades the cluster and launches it, exactly as with the button -above in the graphical front end. You have to replace `"root"` with -a user name and `""` with a password that is valid for authentication -with the cluster. - diff --git a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading25.md b/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading25.md deleted file mode 100644 index 69fbb76cc89b..000000000000 --- a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading25.md +++ /dev/null @@ -1,135 +0,0 @@ -Upgrading to ArangoDB 2.5 -========================= - -Please read the following sections if you upgrade from a previous version to -ArangoDB 2.5. Please be sure that you have checked the list of [changes in 2.5](../../ReleaseNotes/UpgradingChanges25.md) -before upgrading. - -Please note first that a database directory used with ArangoDB 2.5 -cannot be used with earlier versions (e.g. ArangoDB 2.4) any -more. Upgrading a database directory cannot be reverted. Therefore -please make sure to create a full backup of your existing ArangoDB -installation before performing an upgrade. - -In 2.5 we have also changed the paths for Foxx applications. -Please also make sure that you have a backup of all Foxx apps in your `javascript.app-path` and `javascript.dev-app-path`. -It is sufficient to have the source files for Foxx somewhere else so you can reinstall them on error. -To check that everything has worked during upgrade you could use the web-interface Applications tab or - -``` -unix> foxx-manager list -``` - -for all your databases. -The listed apps should be identical before and after the upgrade. - -Database Directory Version Check and Upgrade --------------------------------------------- - -ArangoDB will perform a database version check at startup. When ArangoDB 2.5 -encounters a database created with earlier versions of ArangoDB, it will refuse -to start. This is intentional. - -The output will then look like this: - -``` -2015-02-17T09:43:11Z [8302] ERROR In database '_system': Database directory version (20401) is lower than current version (20500). -2015-02-17T09:43:11Z [8302] ERROR In database '_system': ---------------------------------------------------------------------- -2015-02-17T09:43:11Z [8302] ERROR In database '_system': It seems like you have upgraded the ArangoDB binary. -2015-02-17T09:43:11Z [8302] ERROR In database '_system': If this is what you wanted to do, please restart with the -2015-02-17T09:43:11Z [8302] ERROR In database '_system': --upgrade -2015-02-17T09:43:11Z [8302] ERROR In database '_system': option to upgrade the data in the database directory. -2015-02-17T09:43:11Z [8302] ERROR In database '_system': Normally you can use the control script to upgrade your database -2015-02-17T09:43:11Z [8302] ERROR In database '_system': /etc/init.d/arangodb stop -2015-02-17T09:43:11Z [8302] ERROR In database '_system': /etc/init.d/arangodb upgrade -2015-02-17T09:43:11Z [8302] ERROR In database '_system': /etc/init.d/arangodb start -2015-02-17T09:43:11Z [8302] ERROR In database '_system': ---------------------------------------------------------------------- -2015-02-17T09:43:11Z [8302] FATAL Database '_system' needs upgrade. Please start the server with the --upgrade option -``` - -To make ArangoDB 2.5 start with a database directory created with an earlier -ArangoDB version, you may need to invoke the upgrade procedure once. This can -be done by running ArangoDB from the command line and supplying the `--upgrade` -option. -Note: We have changed Foxx folder structure and implemented an upgrade task to move your -applications to the new structure. In order to tell this upgrade task to also move your -development Foxx apps please make sure you give the dev-app-path as well. -If you have not used development mode for Foxx apps you can drop the -`--javascript.dev-app-path`. -It is only possible to upgrade one dev-app-path together with one data folder. - - unix> arangod data --upgrade --javascript.dev-app-path devapps - -where `data` is ArangoDB's main data directory -and `devapps` is the directory where you develop Foxx apps. - -Note: here the same database should be specified that is also specified when -arangod is started regularly. Please do not run the `--upgrade` command on each -individual database subfolder (named `database-`). - -For example, if you regularly start your ArangoDB server with - - unix> arangod mydatabasefolder - -then running - - unix> arangod mydatabasefolder --upgrade - -will perform the upgrade for the whole ArangoDB instance, including all of its -databases. - -Starting with `--upgrade` will run a database version check and perform any -necessary migrations. As usual, you should create a backup of your database -directory before performing the upgrade. - -The last line of the output should look like this: -``` -2014-12-22T12:03:31Z [12026] INFO database upgrade passed -``` - -Please check the full output the `--upgrade` run. Upgrading may produce errors, which need -to be fixed before ArangoDB can be used properly. If no errors are present or -they have been resolved manually, you can start ArangoDB 2.5 regularly. - -Upgrading a cluster planned in the web interface ------------------------------------------------- - -A cluster of ArangoDB instances has to be upgraded as well. This -involves upgrading all ArangoDB instances in the cluster, as well as -running the version check on the whole running cluster in the end. - -We have tried to make this procedure as painless and convenient for you. -We assume that you planned, launched and administrated a cluster using the -graphical front end in your browser. The upgrade procedure is then as -follows: - - 1. First shut down your cluster using the graphical front end as - usual. - - 2. Then upgrade all dispatcher instances on all machines in your - cluster using the version check as described above and restart them. - - 3. Now open the cluster dash board in your browser by pointing it to - the same dispatcher that you used to plan and launch the cluster in - the graphical front end. In addition to the usual buttons - "Relaunch", "Edit cluster plan" and "Delete cluster plan" you will - see another button marked "Upgrade and relaunch cluster". - - 4. Hit this button, your cluster will be upgraded and launched and - all is done for you behind the scenes. If all goes well, you will - see the usual cluster dash board after a few seconds. If there is - an error, you have to inspect the log files of your cluster - ArangoDB instances. Please let us know if you run into problems. - -There is an alternative way using the `ArangoDB` shell. Instead of -steps 3. and 4. above you can launch `arangosh`, point it to the dispatcher -that you have used to plan and launch the cluster using the option -``--server.endpoint``, and execute - - arangosh> require("org/arangodb/cluster").Upgrade("root",""); - -This upgrades the cluster and launches it, exactly as with the button -above in the graphical front end. You have to replace `"root"` with -a user name and `""` with a password that is valid for authentication -with the cluster. - diff --git a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading26.md b/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading26.md deleted file mode 100644 index 8c767dd25938..000000000000 --- a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading26.md +++ /dev/null @@ -1,112 +0,0 @@ -Upgrading to ArangoDB 2.6 -========================= - -Please read the following sections if you upgrade from a previous version to -ArangoDB 2.6. Please be sure that you have checked the list of [changes in 2.6](../../ReleaseNotes/UpgradingChanges26.md) -before upgrading. - -Please note first that a database directory used with ArangoDB 2.6 -cannot be used with earlier versions (e.g. ArangoDB 2.5) any -more. Upgrading a database directory cannot be reverted. Therefore -please make sure to create a full backup of your existing ArangoDB -installation before performing an upgrade. - -Database Directory Version Check and Upgrade --------------------------------------------- - -ArangoDB will perform a database version check at startup. When ArangoDB 2.6 -encounters a database created with earlier versions of ArangoDB, it will refuse -to start. This is intentional. - -The output will then look like this: - -``` -2015-02-17T09:43:11Z [8302] ERROR In database '_system': Database directory version (20501) is lower than current version (20600). -2015-02-17T09:43:11Z [8302] ERROR In database '_system': ---------------------------------------------------------------------- -2015-02-17T09:43:11Z [8302] ERROR In database '_system': It seems like you have upgraded the ArangoDB binary. -2015-02-17T09:43:11Z [8302] ERROR In database '_system': If this is what you wanted to do, please restart with the -2015-02-17T09:43:11Z [8302] ERROR In database '_system': --upgrade -2015-02-17T09:43:11Z [8302] ERROR In database '_system': option to upgrade the data in the database directory. -2015-02-17T09:43:11Z [8302] ERROR In database '_system': Normally you can use the control script to upgrade your database -2015-02-17T09:43:11Z [8302] ERROR In database '_system': /etc/init.d/arangodb stop -2015-02-17T09:43:11Z [8302] ERROR In database '_system': /etc/init.d/arangodb upgrade -2015-02-17T09:43:11Z [8302] ERROR In database '_system': /etc/init.d/arangodb start -2015-02-17T09:43:11Z [8302] ERROR In database '_system': ---------------------------------------------------------------------- -2015-02-17T09:43:11Z [8302] FATAL Database '_system' needs upgrade. Please start the server with the --upgrade option -``` - -To make ArangoDB 2.6 start with a database directory created with an earlier -ArangoDB version, you may need to invoke the upgrade procedure once. This can -be done by running ArangoDB from the command line and supplying the `--upgrade` -option. - -Note: here the same database should be specified that is also specified when -arangod is started regularly. Please do not run the `--upgrade` command on each -individual database subfolder (named `database-`). - -For example, if you regularly start your ArangoDB server with - - unix> arangod mydatabasefolder - -then running - - unix> arangod mydatabasefolder --upgrade - -will perform the upgrade for the whole ArangoDB instance, including all of its -databases. - -Starting with `--upgrade` will run a database version check and perform any -necessary migrations. As usual, you should create a backup of your database -directory before performing the upgrade. - -The last line of the output should look like this: -``` -2014-12-22T12:03:31Z [12026] INFO database upgrade passed -``` - -Please check the full output the `--upgrade` run. Upgrading may produce errors, which need -to be fixed before ArangoDB can be used properly. If no errors are present or -they have been resolved manually, you can start ArangoDB 2.6 regularly. - -Upgrading a cluster planned in the web interface ------------------------------------------------- - -A cluster of ArangoDB instances has to be upgraded as well. This -involves upgrading all ArangoDB instances in the cluster, as well as -running the version check on the whole running cluster in the end. - -We have tried to make this procedure as painless and convenient for you. -We assume that you planned, launched and administrated a cluster using the -graphical front end in your browser. The upgrade procedure is then as -follows: - - 1. First shut down your cluster using the graphical front end as - usual. - - 2. Then upgrade all dispatcher instances on all machines in your - cluster using the version check as described above and restart them. - - 3. Now open the cluster dash board in your browser by pointing it to - the same dispatcher that you used to plan and launch the cluster in - the graphical front end. In addition to the usual buttons - "Relaunch", "Edit cluster plan" and "Delete cluster plan" you will - see another button marked "Upgrade and relaunch cluster". - - 4. Hit this button, your cluster will be upgraded and launched and - all is done for you behind the scenes. If all goes well, you will - see the usual cluster dash board after a few seconds. If there is - an error, you have to inspect the log files of your cluster - ArangoDB instances. Please let us know if you run into problems. - -There is an alternative way using the `ArangoDB` shell. Instead of -steps 3. and 4. above you can launch `arangosh`, point it to the dispatcher -that you have used to plan and launch the cluster using the option -``--server.endpoint``, and execute - - arangosh> require("org/arangodb/cluster").Upgrade("root",""); - -This upgrades the cluster and launches it, exactly as with the button -above in the graphical front end. You have to replace `"root"` with -a user name and `""` with a password that is valid for authentication -with the cluster. - diff --git a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading28.md b/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading28.md deleted file mode 100644 index 29ace64af77c..000000000000 --- a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading28.md +++ /dev/null @@ -1,169 +0,0 @@ -Upgrading to ArangoDB 2.8 -========================= - -Please read the following sections if you upgrade from a previous version to -ArangoDB 2.8. Please be sure that you have checked the list of [changes in 2.8](../../ReleaseNotes/UpgradingChanges28.md) -before upgrading. - -Please note first that a database directory used with ArangoDB 2.8 -cannot be used with earlier versions (e.g. ArangoDB 2.7) any -more. Upgrading a database directory cannot be reverted. Therefore -please make sure to create a full backup of your existing ArangoDB -installation before performing an upgrade. - -Database Directory Version Check and Upgrade --------------------------------------------- - -ArangoDB will perform a database version check at startup. When ArangoDB 2.8 -encounters a database created with earlier versions of ArangoDB, it will refuse -to start. This is intentional. - -The output will then look like this: - -``` -2015-12-04T17:11:17Z [31432] ERROR In database '_system': Database directory version (20702) is lower than current version (20800). -2015-12-04T17:11:17Z [31432] ERROR In database '_system': ---------------------------------------------------------------------- -2015-12-04T17:11:17Z [31432] ERROR In database '_system': It seems like you have upgraded the ArangoDB binary. -2015-12-04T17:11:17Z [31432] ERROR In database '_system': If this is what you wanted to do, please restart with the -2015-12-04T17:11:17Z [31432] ERROR In database '_system': --upgrade -2015-12-04T17:11:17Z [31432] ERROR In database '_system': option to upgrade the data in the database directory. -2015-12-04T17:11:17Z [31432] ERROR In database '_system': Normally you can use the control script to upgrade your database -2015-12-04T17:11:17Z [31432] ERROR In database '_system': /etc/init.d/arangodb stop -2015-12-04T17:11:17Z [31432] ERROR In database '_system': /etc/init.d/arangodb upgrade -2015-12-04T17:11:17Z [31432] ERROR In database '_system': /etc/init.d/arangodb start -2015-12-04T17:11:17Z [31432] ERROR In database '_system': ---------------------------------------------------------------------- -2015-12-04T17:11:17Z [31432] FATAL Database '_system' needs upgrade. Please start the server with the --upgrade option -``` - -To make ArangoDB 2.8 start with a database directory created with an earlier -ArangoDB version, you may need to invoke the upgrade procedure once. This can -be done by running ArangoDB from the command line and supplying the `--upgrade` -option. - -Note: here the same database should be specified that is also specified when -arangod is started regularly. Please do not run the `--upgrade` command on each -individual database subfolder (named `database-`). - -For example, if you regularly start your ArangoDB server with - - unix> arangod mydatabasefolder - -then running - - unix> arangod mydatabasefolder --upgrade - -will perform the upgrade for the whole ArangoDB instance, including all of its -databases. - -Starting with `--upgrade` will run a database version check and perform any -necessary migrations. As usual, you should create a backup of your database -directory before performing the upgrade. - -The last line of the output should look like this: -``` -2015-12-04T17:12:15Z [31558] INFO database upgrade passed -``` - -Please check the full output the `--upgrade` run. Upgrading may produce errors, which need -to be fixed before ArangoDB can be used properly. If no errors are present or -they have been resolved manually, you can start ArangoDB 2.8 regularly. - -Upgrading a cluster planned in the web interface ------------------------------------------------- - -A cluster of ArangoDB instances has to be upgraded as well. This -involves upgrading all ArangoDB instances in the cluster, as well as -running the version check on the whole running cluster in the end. - -We have tried to make this procedure as painless and convenient for you. -We assume that you planned, launched and administrated a cluster using the -graphical front end in your browser. The upgrade procedure is then as -follows: - - 1. First shut down your cluster using the graphical front end as - usual. - - 2. Then upgrade all dispatcher instances on all machines in your - cluster using the version check as described above and restart them. - - 3. Now open the cluster dash board in your browser by pointing it to - the same dispatcher that you used to plan and launch the cluster in - the graphical front end. In addition to the usual buttons - "Relaunch", "Edit cluster plan" and "Delete cluster plan" you will - see another button marked "Upgrade and relaunch cluster". - - 4. Hit this button, your cluster will be upgraded and launched and - all is done for you behind the scenes. If all goes well, you will - see the usual cluster dash board after a few seconds. If there is - an error, you have to inspect the log files of your cluster - ArangoDB instances. Please let us know if you run into problems. - -There is an alternative way using the `ArangoDB` shell. Instead of -steps 3. and 4. above you can launch `arangosh`, point it to the dispatcher -that you have used to plan and launch the cluster using the option -``--server.endpoint``, and execute - - arangosh> require("org/arangodb/cluster").Upgrade("root",""); - -This upgrades the cluster and launches it, exactly as with the button -above in the graphical front end. You have to replace `"root"` with -a user name and `""` with a password that is valid for authentication -with the cluster. - -Upgrading Foxx apps generated by ArangoDB 2.7 and earlier ---------------------------------------------------------- - -The implementation of the `require` function used to import modules in -ArangoDB and Foxx [has changed](../../ReleaseNotes/UpgradingChanges28.md#module-resolution) -in order to improve compatibility with Node.js modules. - -Given an app/service with the following layout: - -* manifest.json -* controllers/ - * todos.js -* models/ - * todo.js -* repositories/ - * todos.js -* node_modules/ - * models/ - * todo.js - -The file `controllers/todos.js` would previously contain the following -`require` calls: - -```js -var _ = require('underscore'); -var joi = require('joi'); -var Foxx = require('org/arangodb/foxx'); -var ArangoError = require('org/arangodb').ArangoError; -var Todos = require('repositories/todos'); // <-- ! -var Todo = require('models/todo'); // <-- ! -``` - -The require paths `repositories/todos` and `models/todo` were previously -resolved locally as relative to the app root. - -Starting with 2.8 these paths would instead be resolved as relative to -the `node_modules` folder or the global ArangoDB module paths before being -resolved locally as a fallback. - -In the given example layout the app would break in 2.8 because the module -name `models/todo` would always resolve to `node_modules/models/todo.js` -(which previously would have been ignored) instead of the local `models/todo.js`. - -In order to make sure the app still works in 2.8, the require calls in -`controllers/todos.js` would need to be adjusted to look like this: - -```js -var _ = require('underscore'); -var joi = require('joi'); -var Foxx = require('org/arangodb/foxx'); -var ArangoError = require('org/arangodb').ArangoError; -var Todos = require('../repositories/todos'); // <-- ! -var Todo = require('../models/todo'); // <-- ! -``` - -Note that the old "global" style require calls may still work in 2.8 but -may break unexpectedly if modules with matching names are installed globally. diff --git a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading30.md b/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading30.md deleted file mode 100644 index b6fd5b8922dd..000000000000 --- a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading30.md +++ /dev/null @@ -1,128 +0,0 @@ -Upgrading to ArangoDB 3.0 -========================= - -Please read the following sections if you upgrade from a previous -version to ArangoDB 3.0. Please be sure that you have checked the list -of [changes in 3.0](../../ReleaseNotes/UpgradingChanges30.md) before -upgrading. - -Migrating databases and collections from ArangoDB 2.8 to 3.0 ------------------------------------------------------------- - -ArangoDB 3.0 does not provide an automatic update mechanism for database -directories created with the 2.x branches of ArangoDB. - -In order to migrate data from ArangoDB 2.8 (or an older 2.x version) into -ArangoDB 3.0, it is necessary to export the data from 2.8 using `arangodump`, -and then import the dump into a fresh ArangoDB 3.0 with `arangorestore`. - -To do this, first run the 2.8 version of `arangodump` to export the database -data into a directory. `arangodump` will dump the `_system` database by default. -In order to make it dump multiple databases, it needs to be invoked once per -source database, e.g. - -``` -# in 2.8 -arangodump --server.database _system --output-directory dump-system -arangodump --server.database mydb --output-directory dump-mydb -... -``` - -That will produce a dump directory for each database that `arangodump` is -called for. If the server has authentication turned on, it may be necessary to -provide the required credentials when invoking `arangodump`, e.g. - -``` -arangodump --server.database _system --server.username myuser --server.password mypasswd --output-directory dump-system -``` - -The dumps produced by `arangodump` can now be imported into ArangoDB 3.0 using -the 3.0 version of `arangodump`: - -``` -# in 3.0 -arangorestore --server.database _system --input-directory dump-system -arangorestore --server.database mydb --input-directory dump-mydb -... -``` - -arangorestore will by default fail if the target database does not exist. It can -be told to create it automatically using the option `--create-database true`: - -``` -arangorestore --server.database mydb --create-database true --input-directory dump-mydb -``` -And again it may be required to provide access credentials when invoking -`arangorestore`: - -``` -arangorestore --server.database mydb --create-database true --server.username myuser --server.password mypasswd --input-directory dump-system -``` - -Please note that the version of dump/restore should match the server version, i.e. -it is required to dump the original data with the 2.8 version of `arangodump` -and restore it with the 3.0 version of `arangorestore`. - -After that the 3.0 instance of ArangoDB will contain the databases and collections -that were present in the 2.8 instance. - -Adjusting authentication info ------------------------------ - -Authentication information was stored per database in ArangoDB 2.8, meaning there -could be different users and access credentials per database. In 3.0, the users are -stored in a central location in the `_system` database. To use the same user setup -as in 2.8, it may be required to create extra users and/or adjust their permissions. - -In order to do that, please connect to the 3.0 instance with an ArangoShell (this -will connect to the `_system` database by default): - -``` -arangosh --server.username myuser --server.password mypasswd -``` - -Use the following commands to create a new user with some password and grant them -access to a specific database - -``` -require("@arangodb/users").save(username, password, true); -require("@arangodb/users").grantDatabase(username, databaseName, "rw"); -``` - -For example, to create a user `myuser` with password `mypasswd` and give them -access to databases `mydb1` and `mydb2`, the commands would look as follows: - -``` -require("@arangodb/users").save("myuser", "mypasswd", true); -require("@arangodb/users").grantDatabase("myuser", "mydb1", "rw"); -require("@arangodb/users").grantDatabase("myuser", "mydb2", "rw"); -``` - -Existing users can also be updated, removed or listed using the following -commands: - -``` -/* update user myuser with password mypasswd */ -require("@arangodb/users").update("myuser", "mypasswd", true); - -/* remove user myuser */ -require("@arangodb/users").remove("myuser"); - -/* list all users */ -require("@arangodb/users").all(); -``` - -Foxx applications ------------------ - -The dump/restore procedure described above will not export and re-import Foxx applications. -In order to move these from 2.8 to 3.0, Foxx applications should be exported as zip files -via the 2.8 web interface. - -The zip files can then be uploaded in the "Services" section in the ArangoDB 3.0 web interface. -Applications may need to be adjusted manually to run in 3.0. Please consult the -[migration guide for Foxx apps](../../Foxx/Migrating2x/README.md). - -An alternative way of moving Foxx apps into 3.0 is to copy the source directory of a 2.8 Foxx -application manually into the 3.0 Foxx apps directory for the target database (which is normally -`/var/lib/arangodb3-apps/_db//` but the exact location is platform-specific). diff --git a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading31.md b/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading31.md deleted file mode 100644 index 453544fc92e6..000000000000 --- a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading31.md +++ /dev/null @@ -1,6 +0,0 @@ -Upgrading to ArangoDB 3.1 -========================= - -Please be sure that you have checked the list of [changes in 3.1](../../ReleaseNotes/UpgradingChanges31.md) -before upgrading. - diff --git a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading32.md b/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading32.md deleted file mode 100644 index 1999932733e5..000000000000 --- a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading32.md +++ /dev/null @@ -1,23 +0,0 @@ -Upgrading to ArangoDB 3.2 -========================= - -Please read the following sections if you upgrade from a previous -version to ArangoDB 3.2. Also, please, be sure that you have checked the list -of [incompatible changes in 3.2](../../ReleaseNotes/UpgradingChanges32.md) before -upgrading. - -Switching the storage engine ----------------------------- - -In order to use a different storage engine with an existing data directory, -it is required to first create a logical backup of the data using *arangodump*. -That backup should be created before the upgrade to 3.2. - -After that, the ArangoDB installation can be upgraded and stopped. The server -should then be restarted with the desired storage engine selected (this can be -done by setting the option *--server.storage-engine*) and using a non-existing -data directory. This will start the server with the selected storage engine -but with no data. - -When the server is up and running, the data from the logical backup can be -re-imported using *arangorestore*. diff --git a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading33.md b/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading33.md deleted file mode 100644 index f3e174eb383c..000000000000 --- a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading33.md +++ /dev/null @@ -1,6 +0,0 @@ -Upgrading to ArangoDB 3.3 -========================= - -Please be sure that you have checked the list of [incompatible changes in 3.3](../../ReleaseNotes/UpgradingChanges33.md) -before upgrading. - diff --git a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading34.md b/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading34.md deleted file mode 100644 index bfbee60ef9ac..000000000000 --- a/Documentation/Books/Manual/Upgrading/VersionSpecific/Upgrading34.md +++ /dev/null @@ -1,32 +0,0 @@ -Upgrading to ArangoDB 3.4 -========================= - -Please be sure that you have checked the list of -[incompatible changes in 3.4](../../ReleaseNotes/UpgradingChanges34.md) -before upgrading. - -Upon upgrading from 3.3 to 3.4, the following storage engine-specific data conversion tasks -will be executed: - -- **MMFiles storage engine:** - - All collection datafiles will be rewritten into a - new data format. This data format is required in order to support using the collections - in [ArangoSearch Views](../../Views/ArangoSearch/README.md) introduced in ArangoDB 3.4. - - The conversion will read each datafile sequentially and write out a new datafile in the - new format sequentially. This means the disk will be involved, but both reading and - writing are done in a sequential fashion. Preliminary tests have shown that it will need - at most 2-3 times as long as it takes to copy the database directory. - -- **RocksDB storage engine:** - - All existing geo indexes will be rewritten into a new - data format. This data format is required for using the indexes with the improved - [geo index feature](../../Indexing/Geo.md) in ArangoDB 3.4. - - Preliminary tests have shown that the conversion can process about 500K to 1M geo index - entries per second on commodity hardware. - -If you upgrade without any existing data (new blank data folder), then none of these tasks -needs to be run because the datafiles will be created using the new format already. diff --git a/Documentation/Books/Manual/Views/ArangoSearch/Analyzers.md b/Documentation/Books/Manual/Views/ArangoSearch/Analyzers.md deleted file mode 100644 index b1d1496b044e..000000000000 --- a/Documentation/Books/Manual/Views/ArangoSearch/Analyzers.md +++ /dev/null @@ -1,7 +0,0 @@ -ArangoSearch Analyzers -====================== - -To simplify query syntax ArangoSearch provides a concept of named analyzers -which are merely aliases for type+configuration of IResearch analyzers. See -the [Analyzers](../../Analyzers/README.md) for a description of their usage -and management. diff --git a/Documentation/Books/Manual/Views/ArangoSearch/DetailedOverview.md b/Documentation/Books/Manual/Views/ArangoSearch/DetailedOverview.md deleted file mode 100644 index 07d13cd0ea3d..000000000000 --- a/Documentation/Books/Manual/Views/ArangoSearch/DetailedOverview.md +++ /dev/null @@ -1,249 +0,0 @@ -# Detailed overview of ArangoSearch Views - -ArangoSearch is a powerful fulltext search component with additional -functionality, supported via the *text* analyzer and *tfidf* / *bm25* -[scorers](Scorers.md), without impact on performance when specifying documents -from different collections or filtering on multiple document attributes. - -## View datasource - -Search functionality is exposed to ArangoDB via the view API for views of type -`arangosearch`. The ArangoSearch View is merely an identity transformation -applied onto documents stored in linked collections of the same ArangoDB -database. In plain terms an ArangoSearch View only allows filtering and sorting -of documents located in collections of the same database. The matching documents -themselves are returned as-is from their corresponding collections. - -## Links to ArangoDB collections - -A concept of an ArangoDB collection 'link' is introduced to allow specifying -which ArangoDB collections a given ArangoSearch View should query for documents -and how these documents should be queried. - -An ArangoSearch Link is a uni-directional connection from an ArangoDB collection -to an ArangoSearch View describing how data coming from the said collection -should be made available in the given view. Each ArangoSearch Link in an -ArangoSearch view is uniquely identified by the name of the ArangoDB collection -it links to. An ArangoSearch View may have zero or more links, each to a -distinct ArangoDB collection. Similarly an ArangoDB collection may be referenced -via links by zero or more distinct ArangoSearch Views. In other words, any given -ArangoSearch View may be linked to any given ArangoDB collection of the same -database with zero or one link. However, any ArangoSearch View may be linked to -multiple distinct ArangoDB collections and similarly any ArangoDB collection may -be referenced by multiple ArangoSearch Views. - -To configure an ArangoSearch View for consideration of documents from a given -ArangoDB collection a link definition must be added to the properties of the -said ArangoSearch View defining the link parameters as per the section -[View definition/modification](#view-definitionmodification). - -## Index - -Inverted Index is the heart of ArangoSearch. The index consists of several -independent segments and the index segment itself is meant to be treated as a -standalone index. - -## Analyzers - -To simplify query syntax ArangoSearch provides a concept of -[named analyzers](Analyzers.md) which are merely aliases for type+configuration -of IResearch analyzers. - - -## View definition/modification - -An ArangoSearch View is configured via an object containing a set of -view-specific configuration directives and a map of link-specific configuration -directives. - -During view creation the following directives apply: - -- **name** (_required_; type: `string`): the view name -- **type** (_required_; type: `string`): the value `"arangosearch"` -- any of the directives from the section [View properties](#view-properties) - -During view modification the following directives apply: - -- **links** (_optional_; type: `object`): - a mapping of `collection-name/collection-identifier` to one of: - - link creation - link definition as per the section [Link properties](#link-properties) - - link removal - JSON keyword *null* (i.e. nullify a link if present)
-- any of the directives from the section [View properties](#view-properties) - -## View properties -The following terminology from ArangoSearch architecture is used to understand -view properties assignment of its type: - -The index consists of several independent segments and the index **segment** -itself is meant to be treated as a standalone index. **Commit** is meant to be -treated as the procedure of accumulating processed data creating new index -segments. **Consolidation** is meant to be treated as the procedure of joining -multiple index segments into a bigger one and removing garbage documents (e.g. -deleted from a collection). **Cleanup** is meant to be treated as the procedure -of removing unused segments after release of internal resources. - -- **cleanupIntervalStep** (_optional_; type: `integer`; default: `10`; to - disable use: `0`) - - ArangoSearch waits at least this many commits between removing unused files in - its data directory for the case where the consolidation policies merge - segments often (i.e. a lot of commit+consolidate). A lower value will cause a - lot of disk space to be wasted for the case where the consolidation policies - rarely merge segments (i.e. few inserts/deletes). A higher value will impact - performance without any added benefits. - - > With every **commit** or **consolidate** operation a new state of the view - > internal data-structures is created on disk. Old states/snapshots are - > released once there are no longer any users remaining. However, the files - > for the released states/snapshots are left on disk, and only removed by - > "cleanup" operation. - -- **consolidationIntervalMsec** (_optional_; type: `integer`; default: `60000`; - to disable use: `0`) - - ArangoSearch waits _at least_ this many milliseconds between committing view - data store changes and making documents visible to queries. A lower value - will cause the view not to account for them, (until commit), and memory usage - would continue to grow for the case where there are a few inserts/updates. A - higher value will impact performance and waste disk space for each commit call - without any added benefits. - - > For data retrieval ArangoSearch Views follow the concept of - > "eventually-consistent", i.e. eventually all the data in ArangoDB will be - > matched by corresponding query expressions. The concept of an ArangoSearch - > View "commit" operation is introduced to control the upper-bound on the time - > until document addition/removals are actually reflected by corresponding - > query expressions. Once a **commit** operation is complete, all documents - > added/removed prior to the start of the **commit** operation will be - > reflected by queries invoked in subsequent ArangoDB transactions, while - > in-progress ArangoDB transactions will still continue to return a - > repeatable-read state. - -ArangoSearch performs operations in its index based on numerous writer -objects that are mapped to processed segments. In order to control memory that -is used by these writers (in terms of "writers pool") one can use -`writebuffer*` properties of a view. - -- **writebufferIdle** (_optional_; type: `integer`; default: `64`; - to disable use: `0`) - - Maximum number of writers (segments) cached in the pool. - -- **writebufferActive** (_optional_; type: `integer`; default: `0`; - to disable use: `0`) - - Maximum number of concurrent active writers (segments) that perform a transaction. - Other writers (segments) wait till current active writers (segments) finish. - -- **writebufferSizeMax** (_optional_; type: `integer`; default: `33554432`; - to disable use: `0`) - - Maximum memory byte size per writer (segment) before a writer (segment) flush is - triggered. `0` value turns off this limit for any writer (buffer) and data will - be flushed periodically based on the - [value defined for the flush thread](../../Programs/Arangod/Server.md#data-source-flush-synchronization) - (ArangoDB server startup option). `0` value should be used carefully due to high - potential memory consumption. - -- **consolidationPolicy** (_optional_; type: `object`; default: `{}`) - - The consolidation policy to apply for selecting data store segment merge - candidates. - - > With each ArangoDB transaction that inserts documents, one or more - > ArangoSearch internal segments gets created. Similarly, for removed - > documents the segments containing such documents will have these documents - > marked as "deleted". Over time this approach causes a lot of small and - > sparse segments to be created. A **consolidation** operation selects one or - > more segments and copies all of their valid documents into a single new - > segment, thereby allowing the search algorithm to perform more optimally and - > for extra file handles to be released once old segments are no longer used. - - - **type** (_optional_; type: `string`; default: `"bytes_accum"`) - - The segment candidates for the "consolidation" operation are selected based - upon several possible configurable formulas as defined by their types. - The currently supported types are: - - - **bytes_accum**: Consolidation is performed based on current memory consumption - of segments and `threshold` property value. - - **tier**: Consolidate based on segment byte size and live document count - as dictated by the customization attributes. - -### `consolidationPolicy` properties for `bytes_accum` type - - **threshold** (_optional_; type: `float`; default: `0.1`) - - Defines threshold value of `[0.0, 1.0]` possible range. Consolidation is - performed on segments which accumulated size in bytes is less than all - segments' byte size multiplied by the `threshold`; i.e. the following formula - is applied for each segment: - `{threshold} > (segment_bytes + sum_of_merge_candidate_segment_bytes) / all_segment_bytes`. - -### `consolidationPolicy` properties for `tier` type - - - **segmentsMin** (_optional_; type: `integer`; default: `1`) - - The minimum number of segments that will be evaluated as candidates for consolidation. - - - **segmentsMax** (_optional_; type: `integer`; default: `10`) - - The maximum number of segments that will be evaluated as candidates for consolidation. - - - **segmentsBytesMax** (_optional_; type: `integer`; default: `5368709120`) - - Maximum allowed size of all consolidated segments in bytes. - - - **segmentsBytesFloor** (_optional_; type: `integer`; default: `2097152`) - - Defines the value (in bytes) to treat all smaller segments as equal for consolidation - selection. - - - **lookahead** (_optional_; type: `integer`; default: `18446744073709552000`) - - The number of additionally searched tiers except initially chosen candidates based on - `segmentsMin`, `segmentsMax`, `segmentsBytesMax`, `segmentsBytesFloor` with - respect to defined values. Default value is treated as searching among all existing - segments. - -## Link properties - -- **analyzers** (_optional_; type: `array`; subtype: `string`; default: `[ - 'identity' ]`) - - A list of analyzers, by name as defined via the [Analyzers](Analyzers.md), - that should be applied to values of processed document attributes. - -- **fields** (_optional_; type: `object`; default: `{}`) - - An object `{attribute-name: [Link properties]}` of fields that should be - processed at each level of the document. Each key specifies the document - attribute to be processed. Note that the value of `includeAllFields` is also - consulted when selecting fields to be processed. Each value specifies the - [Link properties](#link-properties) directives to be used when processing the - specified field, a Link properties value of `{}` denotes inheritance of all - (except `fields`) directives from the current level. - -- **includeAllFields** (_optional_; type: `boolean`; default: `false`) - - If set to `true`, then process all document attributes. Otherwise, only - consider attributes mentioned in `fields`. Attributes not explicitly - specified in `fields` will be processed with default link properties, i.e. - `{}`. - -- **trackListPositions** (_optional_; type: `boolean`; default: `false`) - - If set to `true`, then for array values track the value position in arrays. - E.g., when querying for the input `{ attr: [ 'valueX', 'valueY', 'valueZ' ] - }`, the user must specify: `doc.attr[1] == 'valueY'`. Otherwise, all values in - an array are treated as equal alternatives. E.g., when querying for the input - `{ attr: [ 'valueX', 'valueY', 'valueZ' ] }`, the user must specify: `doc.attr - == 'valueY'`. - -- **storeValues** (_optional_; type: `string`; default: `"none"`) - - This property controls how the view should keep track of the attribute values. - Valid values are: - - - **none**: Do not store values with the view. - - **id**: Store information about value presence to allow use of the - `EXISTS()` function. diff --git a/Documentation/Books/Manual/Views/ArangoSearch/GettingStarted.md b/Documentation/Books/Manual/Views/ArangoSearch/GettingStarted.md deleted file mode 100644 index e116138a3433..000000000000 --- a/Documentation/Books/Manual/Views/ArangoSearch/GettingStarted.md +++ /dev/null @@ -1,139 +0,0 @@ -# Getting started with ArangoSearch Views - -## The DDL configuration - -[DDL](https://en.wikipedia.org/wiki/Data_definition_language) is a data -definition language or data description language for defining data structures, -especially database schemas. - -All DDL operations on Views can be done via JavaScript or REST calls. The DDL -syntax follows the well established ArangoDB guidelines and thus is very -similar between the [JavaScript interface for views](../../DataModeling/Views/README.md) -and the [HTTP interface for views](../../../HTTP/Views/index.html).This article -uses the JavaScript syntax. - -Assume the following collections were initially defined in a database using -the following commands: - -```js -c0 = db._create("ExampleCollection0"); -c1 = db._create("ExampleCollection1"); - -c0.save({ i: 0, name: "full", text: "是一个 多模 型数 据库" }); -c0.save({ i: 1, name: "half", text: "是一个 多模" }); -c0.save({ i: 2, name: "other half", text: "型数 据库" }); -c0.save({ i: 3, name: "quarter", text: "是一" }); - -c1.save({ a: "foo", b: "bar", i: 4 }); -c1.save({ a: "foo", b: "baz", i: 5 }); -c1.save({ a: "bar", b: "foo", i: 6 }); -c1.save({ a: "baz", b: "foo", i: 7 }); -``` - -## Creating a View (with default parameters) - -```js -v0 = db._createView("ExampleView", "arangosearch", {}); -``` - -## Linking created View with a collection and adding indexing parameters - -```js -v0 = db._view("ExampleView"); -v0.properties({ - links: { - /* collection Link 0 with additional custom configuration: */ - 'ExampleCollection0': - { - /* examine fields of all linked collections, - using default configuration: */ - includeAllFields: true, - fields: - { - /* a field to apply custom configuration - that will index English text: */ - name: - { - analyzers: ["text_en"] - }, - /* another field to apply custom configuration - that will index Chinese text: */ - text: - { - analyzers: ["text_zh"] - } - } - }, - /* collection Link 1 with custom configuration: */ - 'ExampleCollection1': - { - /* examine all fields using default configuration: */ - includeAllFields: true, - fields: - { - a: - { - /* a field to apply custom configuration - that will index English text: */ - analyzers: ["text_en"] - } - } - } - } - } -); -``` - -## Query data using created View with linked collections - -```js -db._query(`FOR doc IN ExampleView - SEARCH PHRASE(doc.text, '型数 据库', 'text_zh') OR STARTS_WITH(doc.b, 'ba') - SORT TFIDF(doc) DESC - RETURN doc`); -``` - -## Examine query result - -Result of the latter query will include all documents from both linked -collections that include `多模 型数` phrase in Chinese at any part of `text` -property or `b` property in English that starts with `ba`. Additionally, -descendant sorting using [TFIDF algorithm](https://en.wikipedia.org/wiki/TF-IDF) -will be applied during a search: - -```json -[ - { - "_key" : "120", - "_id" : "ExampleCollection0/120", - "_rev" : "_XPoMzCi--_", - "i" : 0, - "name" : "full", - "text" : "是一个 多模 型数 据库" - }, - { - "_key" : "124", - "_id" : "ExampleCollection0/124", - "_rev" : "_XPoMzCq--_", - "i" : 2, - "name" : "other half", - "text" : "型数 据库" - }, - { - "_key" : "128", - "_id" : "ExampleCollection1/128", - "_rev" : "_XPoMzCu--_", - "a" : "foo", - "b" : "bar", - "c" : 0 - }, - { - "_key" : "130", - "_id" : "ExampleCollection1/130", - "_rev" : "_XPoMzCy--_", - "a" : "foo", - "b" : "baz", - "c" : 1 - } -] -``` diff --git a/Documentation/Books/Manual/Views/ArangoSearch/README.md b/Documentation/Books/Manual/Views/ArangoSearch/README.md deleted file mode 100644 index a440109e735b..000000000000 --- a/Documentation/Books/Manual/Views/ArangoSearch/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# ArangoSearch Views powered by IResearch - -## What is ArangoSearch - -ArangoSearch is a natively integrated AQL extension making use of the -[IResearch library](https://github.com/iresearch-toolkit/iresearch). - -- join documents located in different collections to one result list -- filter documents based on AQL boolean expressions and functions -- sort the result set based on how closely each document matched the filter - -A concept of value "analysis" that is meant to break up a given value into -a set of sub-values internally tied together by metadata which influences both -the search and sort stages to provide the most appropriate match for the -specified conditions, similar to queries to web search engines. - -In plain terms this means a user can for example: - -- request documents where the `body` attribute best matches `a quick brown fox` -- request documents where the `dna` attribute best matches a DNA sub sequence -- request documents where the `name` attribute best matches gender -- etc. (via custom analyzers) - -See the [Analyzers](../../Analyzers/README.md) for a detailed description of -usage and management of custom analyzers. - -### The IResearch Library - -IResearch s a cross-platform open source indexing and searching engine written in C++, -optimized for speed and memory footprint, with source available from: -https://github.com/iresearch-toolkit/iresearch - -IResearch is the framework for indexing, filtering and sorting of data. -The indexing stage can treat each data item as an atom or use custom "analyzers" -to break the data item into sub-atomic pieces tied together with internally -tracked metadata. - -The IResearch framework in general can be further extended at runtime with -custom implementations of analyzers (used during the indexing and searching -stages) and scorers (used during the sorting stage) allowing full control over -the behavior of the engine. - -## Using ArangoSearch Views - -To get more familiar with ArangoSearch usage, you may start with -[Getting Started](GettingStarted.md) simple guide and then explore details of -ArangoSearch in [Detailed Overview](DetailedOverview.md), -[Analyzers](Analyzers.md) and -[Scorers](Scorers.md) topics. diff --git a/Documentation/Books/Manual/Views/ArangoSearch/Scorers.md b/Documentation/Books/Manual/Views/ArangoSearch/Scorers.md deleted file mode 100644 index cc95918de166..000000000000 --- a/Documentation/Books/Manual/Views/ArangoSearch/Scorers.md +++ /dev/null @@ -1,47 +0,0 @@ -ArangoSearch Scorers -==================== - -ArangoSearch Scorers are special functions that allow to sort documents from a -view by their score regarding the analyzed fields. - -Details about their usage in AQL can be found in the -[ArangoSearch `SORT` section](../../../AQL/Views/ArangoSearch/index.html#arangosearch-sorting). - -- BM25: order results based on the [BM25 algorithm](https://en.wikipedia.org/wiki/Okapi_BM25) - -- TFIDF: order results based on the [TFIDF algorithm](https://en.wikipedia.org/wiki/TF-IDF) - -### `BM25()` - Best Matching 25 Algorithm - -IResearch provides a 'bm25' scorer implementing the -[BM25 algorithm](https://en.wikipedia.org/wiki/Okapi_BM25). Optionally, free -parameters **k** and **b** of the algorithm typically using for advanced -optimization can be specified as floating point numbers. - -`BM25(doc, k, b)` - -- *doc* (document): must be emitted by `FOR doc IN someView` - -- *k* (number, _optional_): term frequency, the default is _1.2_. *k* - calibrates the text term frequency scaling. A *k* value of *0* corresponds to - a binary model (no term frequency), and a large value corresponds to using raw - term frequency. - -- *b* (number, _optional_): determines the scaling by the total text length, the - default is _0.75_. *b* determines the scaling by the total text length. - - b = 1 corresponds to fully scaling the term weight by the total text length - - b = 0 corresponds to no length normalization. - -At the extreme values of the coefficient *b*, BM25 turns into the ranking -functions known as BM11 (for b = 1) and BM15 (for b = 0). - -### `TFIDF()` - Term Frequency – Inverse Document Frequency Algorithm - -Sorts documents using the -[**term frequency–inverse document frequency** algorithm](https://en.wikipedia.org/wiki/TF-IDF). - -`TFIDF(doc, withNorms)` - -- *doc* (document): must be emitted by `FOR doc IN someView` -- *withNorms* (bool, _optional_): specifying whether norms should be used via - **with-norms**, the default is _false_ diff --git a/Documentation/Books/Manual/book.json b/Documentation/Books/Manual/book.json deleted file mode 100644 index f99551f80a8e..000000000000 --- a/Documentation/Books/Manual/book.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "gitbook": "^3.2.2", - "title": "ArangoDB VERSION_NUMBER Documentation", - "version": "VERSION_NUMBER", - "author": "ArangoDB GmbH", - "description": "Official manual for ArangoDB - the native multi-model NoSQL database", - "language": "en", - "plugins": [ - "-search", - "-lunr", - "-sharing", - "toggle-chapters", - "addcssjs", - "anchorjs", - "sitemap-general@git+https://github.com/Simran-B/gitbook-plugin-sitemap-general.git", - "ga", - "callouts@git+https://github.com/Simran-B/gitbook-plugin-callouts.git", - "edit-link", - "page-toc@git+https://github.com/Simran-B/gitbook-plugin-page-toc.git", - "localized-footer" - ], - "pdf": { - "fontSize": 12, - "toc": true, - "margin": { - "right": 60, - "left": 60, - "top": 35, - "bottom": 35 - } - }, - "styles": { - "website": "styles/website.css" - }, - "pluginsConfig": { - "addcssjs": { - "js": ["styles/header.js", "styles/hs.js"], - "css": ["styles/header.css"] - }, - "sitemap-general": { - "prefix": "https://docs.arangodb.com/devel/Manual/", - "changefreq": "@GCHANGE_FREQ@", - "priority": @GPRIORITY@ - }, - "ga": { - "token": "UA-81053435-2" - }, - "edit-link": { - "base": "https://github.com/arangodb/arangodb/edit/devel/Documentation/Books/Manual", - "label": "Edit Page" - }, - "localized-footer": { - "filename": "FOOTER.html" - } - } -} diff --git a/Documentation/Books/Manual/styles/header.css b/Documentation/Books/Manual/styles/header.css deleted file mode 100644 index 4ec87c77b0e5..000000000000 --- a/Documentation/Books/Manual/styles/header.css +++ /dev/null @@ -1,305 +0,0 @@ -/* Design fix because of the header */ -@import url(https://fonts.googleapis.com/css?family=Roboto:400,500,300,700); - -body { - overflow: hidden; - font-family: Roboto, Helvetica, sans-serif; - background: #444444; -} - -.book .book-header h1 a, .book .book-header h1 a:hover { - display: none; -} - -/* GOOGLE START */ - -.google-search #gsc-iw-id1{ - border: none !important; -} - -.google-search .gsst_b { - position: relative; - top: 10px; - left: -25px; - width: 1px; -} - -.gsst_a .gscb_a { - color: #c01a07 !important; -} - -.google-search input { - background-color: #fff !important; - font-family: Roboto, Helvetica, sans-serif; - font-size: 10pt !important; - padding-left: 5px !important; - float: right; - position: relative; - top: 8px; - width: 100% !important; - height: 30px !important; -} - -.google-search input:active { -} - -.google-search { - margin-right: 10px; - margin-left: 10px !important; - float: right !important; -} - -.google-search td, -.google-search table, -.google-search tr, -.google-search th { - background-color: #444444 !important; -} - -.google-search .gsc-input-box, -.google-search .gsc-input-box input { - border-radius: 3px !important; - width: 200px; -} - -.gsc-branding-text, -.gsc-branding-img, -.gsc-user-defined-text { - display: none !important; -} - -.google-search .gsc-input-box input { - font-size: 16px !important; -} - -.google-search .gsc-search-button { - display: none !important; -} - -.google-search .gsc-control-cse { - padding: 10px !important; -} - -.google-search > div { - float: left !important; - width: 200px !important; -} - -/* GOOGLE END */ - -.book-summary, -.book-body { - margin-top: 48px; -} - -.arangodb-logo, .arangodb-logo-small { - display: inline; - float: left; - padding-top: 12px; - margin-left: 10px; -} - -.arangodb-logo img { - height: 23px; -} - -.arangodb-logo-small { - display: none; -} - -.arangodb-version-switcher { - width: 65px; - height: 44px; - margin-left: 16px; - float: left; - display: inline; - font-weight: bold; - color: #fff; - background-color: inherit; - border: 0; -} - -.arangodb-version-switcher option { - background-color: white; - color: black; -} - - -.arangodb-header { - position: fixed; - width: 100%; - height: 48px; - z-index: 1; -} - -.arangodb-header .socialIcons-googlegroups a img { - position: relative; - height: 14px; - top: 3px; -} - -.arangodb-navmenu { - display: block; - float: right; - margin: 0; - padding: 0; -} - -.arangodb-navmenu li { - display: block; - float: left; -} - -.arangodb-navmenu li a { - display: block; - float: left; - padding: 0 10px; - line-height: 48px; - font-size: 16px; - font-weight: 400; - color: #fff; - text-decoration: none; - font-family: Roboto, Helvetica, sans-serif; -} - -.arangodb-navmenu li.active-tab a, .arangodb-navmenu li a:hover { - background-color: #88A049 !important; -} - -.downloadIcon { - margin-right: 10px; -} - -/** simple responsive updates **/ - -@media screen and (max-width: 1000px) { - .arangodb-navmenu li a { - padding: 0 6px; - } - - .arangodb-logo { - margin-left: 10px; - } - - .google-search { - margin-right: 5px !important; - } - - .downloadIcon { - margin-right: 0; - } - - .socialIcons { - display: none !important; - } -} - - -@media screen and (max-width: 800px) { - - .google-search, - .google-search .gsc-input-box, - .google-search .gsc-input-box input { - width: 130px !important; - } - - .arangodb-navmenu li a { - font-size: 15px; - padding: 0 7px; - } - - .arangodb-logo { - display: none; - } - - .arangodb-logo-small { - display: inline; - margin-left: 10px; - } - - .arangodb-logo-small img { - height: 20px; - } - - .arangodb-version-switcher { - margin: 0; - } - -} - -@media screen and (max-width: 600px) { - .arangodb-navmenu li a { - font-size: 15px; - padding: 0 7px; - } - - .arangodb-version-switcher, - .downloadIcon { - display: none !important; - } - - .google-search, - .google-search .gsc-input-box, - .google-search .gsc-input-box input { - width: 24px !important; - } - - .google-search .gsc-input-box input[style] { - background: url(https://docs.arangodb.com/assets/searchIcon.png) left center no-repeat rgb(255, 255, 255) !important; - } - - .google-search .gsc-input-box input:focus { - width: 200px !important; - position: relative; - left: -176px; - background-position: -9999px -9999px !important; - } - -} - -@media screen and (max-width: 400px) { - .arangodb-navmenu li a { - font-size: 13px; - padding: 0 5px; - } - .google-search { - display: none; - } -} - -/*Hubspot Cookie notice */ - -body div#hs-eu-cookie-confirmation { - bottom: 0; - top: auto; - position: fixed; - text-align: center !important; -} - -body div#hs-eu-cookie-confirmation.can-use-gradients { - background-image: linear-gradient(to bottom, rgba(255,255,255,0.9),rgba(255,255,255,0.75)); -} - -body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner { - display: inline-block; - padding: 15px 18px 0; -} - -body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner #hs-en-cookie-confirmation-buttons-area { - float: left; -} - -body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner a#hs-eu-confirmation-button { - background-color: #577138 !important; - border: none !important; - text-shadow: none !important; - box-shadow: none; - padding: 5px 15px !important; - margin-left: 10px; -} - -body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner > p { - float: left; - color: #000 !important; - text-shadow: none; -} diff --git a/Documentation/Books/Manual/styles/header.js b/Documentation/Books/Manual/styles/header.js deleted file mode 100644 index a1983da5652d..000000000000 --- a/Documentation/Books/Manual/styles/header.js +++ /dev/null @@ -1,160 +0,0 @@ -// Try to set the version number early, jQuery not available yet -var searcheable_versions = [@BROWSEABLE_VERSIONS@]; -var cx = '@GSEARCH_ID@'; -document.addEventListener("DOMContentLoaded", function(event) { - if (!gitbook.state.root) return; - var bookVersion = gitbook.state.root.match(/\/(\d\.\d(\.\d)?|devel)\//); - var switcher = document.getElementsByClassName("arangodb-version-switcher")[0]; - if (bookVersion) { - switcher.value = bookVersion[1]; - } else { - switcher.style.display = "none"; - } -}); - -window.onload = function(){ -window.localStorage.removeItem(":keyword"); - -$(document).ready(function() { - -function appendHeader() { - var VERSION_SELECTOR = "" - var i = 0; - var prefix; - for (i = 0; i < searcheable_versions.length; i++ ) { - if (searcheable_versions[i] === 'devel') { - prefix = ''; - } else { - prefix = 'v'; - } - VERSION_SELECTOR += '\n'; - } - - var div = document.createElement('div'); - div.innerHTML = '
\n' + - ' \n' + - '
\n' + - ' \n' + - ' \n' + - ' \n' + - '
\n' + - ' \n' + - ' \n' + - ' \n' + - '
\n'; - - $('.book').before(div.innerHTML); - - }; - - - function rerenderNavbar() { - $('.arangodb-header').remove(); - appendHeader(); - }; - - //render header - rerenderNavbar(); - function addGoogleSrc() { - var gcse = document.createElement('script'); - gcse.type = 'text/javascript'; - gcse.async = true; - gcse.src = (document.location.protocol == 'https:' ? 'https:' : 'http:') + - '//cse.google.com/cse.js?cx=' + cx; - var s = document.getElementsByTagName('script')[0]; - s.parentNode.insertBefore(gcse, s); - }; - addGoogleSrc(); - - $(".arangodb-navmenu a[data-book]").on("click", function(e) { - e.preventDefault(); - var urlSplit = gitbook.state.root.split("/"); - urlSplit.pop(); // "" - urlSplit.pop(); // e.g. "Manual" - window.location.href = urlSplit.join("/") + "/" + e.target.getAttribute("data-book") + "/index.html"; - }); - - // set again using jQuery to accommodate non-standard browsers (*cough* IE *cough*) - var bookVersion = gitbook.state.root.match(/\/(\d\.\d(\.\d)?|devel)\//); - var switcher = $(".arangodb-version-switcher"); - if (bookVersion) { - switcher.val(bookVersion[1]); - } else { - switcher.hide(); - } - - $(".arangodb-version-switcher").on("change", function(e) { - var urlSplit = gitbook.state.root.split("/"); - urlSplit.pop(); // "" - var currentBook = urlSplit.pop(); // e.g. "Manual" - urlSplit.pop() // e.g. "3.0" - if (e.target.value == "2.8") { - var legacyMap = { - "Manual": "", - "AQL": "/Aql", - "HTTP": "/HttpApi", - "Cookbook": "/Cookbook" - }; - currentBook = legacyMap[currentBook]; - } else { - currentBook = "/" + currentBook; - } - window.location.href = urlSplit.join("/") + "/" + e.target.value + currentBook + "/index.html"; - }); - -}); - -}; diff --git a/Documentation/Books/Manual/styles/hs.js b/Documentation/Books/Manual/styles/hs.js deleted file mode 100644 index 9a8ae18a61d2..000000000000 --- a/Documentation/Books/Manual/styles/hs.js +++ /dev/null @@ -1,33 +0,0 @@ -// HubSpot Script Loader. Please do not block this resource. See more: http://hubs.ly/H0702_H0 - -(function (id, src, attrs) { - if (document.getElementById(id)) { - try { console.warn('duplicate hubspot script with id: "' + id + '" included on page'); } - finally { return; } - } - var js = document.createElement('script'); - js.src = src; - js.type = 'text/javascript'; - js.id = id; - for (var name in attrs) { if(attrs.hasOwnProperty(name)) { js.setAttribute(name, attrs[name]); } } - var e = document.getElementsByTagName('script')[0]; - e.parentNode.insertBefore(js, e); -})('hubspot-messages-loader', 'https://js.usemessages.com/messageswidgetshell.js', {"data-loader":"hs-scriptloader","data-hsjs-portal":2482448,"data-hsjs-env":"prod"}); - -(function (id, src) { - if (document.getElementById(id)) { return; } - var js = document.createElement('script'); - js.src = src; - js.type = 'text/javascript'; - js.id = id; - var e = document.getElementsByTagName('script')[0]; - e.parentNode.insertBefore(js, e); -})('hs-analytics', '//js.hs-analytics.net/analytics/1508760300000/2482448.js'); - -window.setTimeout(function () { - $('body').on('click', 'a', function () { - var _hsq = window._hsq = window._hsq || []; - _hsq.push(['setPath', window.location.pathname]); - _hsq.push(['trackPageView']); - }); -}, 1000); diff --git a/Documentation/Books/Manual/styles/website.css b/Documentation/Books/Manual/styles/website.css deleted file mode 100644 index 1c44cc002360..000000000000 --- a/Documentation/Books/Manual/styles/website.css +++ /dev/null @@ -1,96 +0,0 @@ -.markdown-section small { - font-size: 80%; -} -.markdown-section sub, .markdown-section sup { - font-size: 75%; - line-height: 0; - position: relative; - vertical-align: baseline; -} -.markdown-section sup { - top: -.5em; -} -.markdown-section sub { - bottom: -.25em; -} - -div.example_show_button { - border: medium solid lightgray; - text-align: center; - position: relative; - top: -10px; - display: flex; - justify-content: center; -} - -.book .book-body .navigation.navigation-next { - right: 10px !important; -} - -.book .book-summary ul.summary li.active>a,.book .book-summary ul.summary li a:hover { - color: #fff !important; - background: #80A54D !important; - text-decoration: none; -} - -.book .book-body .page-wrapper .page-inner section.normal .deprecated{ - background-color: rgba(240,240,0,0.4); -} - -.book .book-body section > ul li:last-child { - margin-bottom: 0.85em; -} - -.book .book-body .alert p:last-child { - margin-bottom: 0; -} - -.columns-3 { - -webkit-column-count: 3; - -moz-column-count: 3; - -ms-column-count: 3; - -o-column-count: 3; - column-count: 3; - columns: 3; -} - -.book .book-body .program-options code { - background-color: #f0f0f0; -} - -.book .book-body .program-options td { - vertical-align: top; -} - -.book .book-body .program-options td:first-child { - min-width: 250px; -} - -.localized-footer { - opacity: 0.5; -} - -.example-container { - position: relative; -} - -.example-container a.anchorjs-link { - position: absolute; - top: 10px; - right: 10px; - font: 1em/1 anchorjs-icons; -} - -.gsib_a { -padding: 0px !important; -} - -.gsc-control-cse { -border: 0px !important; -background-color: transparent !important; -} - - -.gsc-input { -margin: 0px !important; -} diff --git a/Documentation/Books/README.md b/Documentation/Books/README.md deleted file mode 100644 index 315c123a3079..000000000000 --- a/Documentation/Books/README.md +++ /dev/null @@ -1,5 +0,0 @@ -ArangoDB Documentation -====================== -This folder contains the source for the documentation on [docs.arangodb.com](https://docs.arangodb.com). -However, its heavily preprocessed, and the files contain several extensions to markup. -See [README_maintainers.md](../../README_maintainers.md) for more details. diff --git a/Documentation/Books/SummaryBlacklist.txt b/Documentation/Books/SummaryBlacklist.txt deleted file mode 100644 index 981d73fb3f18..000000000000 --- a/Documentation/Books/SummaryBlacklist.txt +++ /dev/null @@ -1,5 +0,0 @@ -^SUMMARY.md$ -^node_modules/gitbook-plugin-piwik/README.md$ -^node_modules/gitbook-plugin-toggle-chapters/README.md$ -^node_modules/gitbook-plugin-addcssjs/README.md$ -^node_modules/gitbook-plugin-expandable-chapters/README.md$ diff --git a/Documentation/Books/build.sh b/Documentation/Books/build.sh deleted file mode 100755 index 5982f9d12f75..000000000000 --- a/Documentation/Books/build.sh +++ /dev/null @@ -1,870 +0,0 @@ -#!/bin/bash -ALLBOOKS="HTTP AQL Manual Cookbook Drivers" -OTHER_MIME="pdf epub mobi" - -# shellcheck disable=SC2016 -TRIPPLETICS='```' -declare -A COLORS -declare -A C -COLORS[RESET]='\033[0m' -# Regular Colors -#COLORS[Black]='\033[0;30m' # Black -# shellcheck disable=SC2154 -COLORS[Red]='\033[0;31m' # Red -# shellcheck disable=SC2154 -COLORS[Green]='\033[0;32m' # Green -# shellcheck disable=SC2154 -COLORS[Yellow]='\033[0;33m' # Yellow -#COLORS[Blue]='\033[0;34m' # Blue -#COLORS[Purple]='\033[0;35m' # Purple -#COLORS[Cyan]='\033[0;36m' # Cyan -# shellcheck disable=SC2154 -COLORS[White]='\033[0;37m' # White - -for i in "${!COLORS[@]}"; do - # shellcheck disable=SC2086 - C[${i}]=$(echo -e ${COLORS[$i]}) -done - -# shellcheck disable=SC2034 -WRN_COLOR="${C[Yellow]}" -ERR_COLOR="${C[Red]}" -STD_COLOR="${C[White]}" -OK_COLOR="${C[Green]}" -RESET="${C[RESET]}" - -newVersionNumber=$( tr -d '\r\n' < ../../VERSION) - -isCygwin=0 -if test "$(uname -o||true)" == "Cygwin"; then - isCygwin=1 -fi - -declare -A ALL_GSEARCH_ID -for book in ${ALLBOOKS}; do - ALL_GSEARCH_ID[$book]=$( grep "GSEARCH_ID_${book}" ../../VERSIONS |sed 's;.*"\([0-9a-zA-Z:_-]*\)".*;\1;') -done - - -GCHANGE_FREQ=$(grep "GCHANGE_FREQ" ../../VERSIONS |sed 's;.*"\([0-9a-zA-Z:]*\)".*;\1;') -GPRIORITY=$(grep "GPRIORITY" ../../VERSIONS |sed 's;.*"\([0-9a-zA-Z.]*\)".*;\1;') -BROWSEABLE_VERSIONS=$(grep "BROWSEABLE_VERSIONS" ../../VERSIONS |sed -e 's;" *$;;' -e 's;.*";;') - -function start_X11_display() -{ - PIDFILE="$1" - if test -f "${PIDFILE}"; then - stop_X11_display "${PIDFILE}" - fi - /usr/bin/daemon "--pidfile=${PIDFILE}" --name=xvfb --inherit --output=/tmp/xvfb.log -- Xvfb "${DISPLAY}" -screen 0 800x600x16 -ac -pn -noreset -} - -function stop_X11_display() -{ - PIDFILE=$1 - kill "$(cat "${PIDFILE}")" - rm -f "${PIDFILE}" -} - -################################################################################ -# per book targets -function check-summary() -{ - NAME="$1" - echo "${STD_COLOR}##### checking summary for ${NAME}${RESET}" - find "ppbooks/${NAME}" -name \*.md | \ - sed -e "s;ppbooks/${NAME}/;;" | \ - grep -vf SummaryBlacklist.txt | \ - grep -v gitbook-plugin | \ - grep -v node_modules/ | \ - sort > /tmp/is_md.txt - - grep -v '^ *# '< "${NAME}/SUMMARY.md" | \ - grep '(' |sed -e "s;.*(;;" -e "s;).*;;" | \ - sort > /tmp/is_summary.txt - - if test "$(comm -3 /tmp/is_md.txt /tmp/is_summary.txt|wc -l)" -ne 0; then - echo "${ERR_COLOR}" - echo "not all files of ${NAME} are mapped to the summary!" - echo " files found | files in summary" - comm -3 /tmp/is_md.txt /tmp/is_summary.txt - echo "${RESET}" - exit 1 - fi -} - -function book-check-leftover-docublocks() -{ - NAME="$1" - echo "${STD_COLOR}##### checking for left over docublocks in ${NAME}${RESET}" - ERRORS=$(grep -rl "startDocuBlock" --include "*.md" "ppbooks/${NAME}" | sed -e "s/^/- /g") - if test "$(echo -n "${ERRORS}" | wc -l)" -gt 0; then - echo "${ERR_COLOR}" - echo "startDocuBlock markers still found in generated output files:" - echo "${ERRORS}" - echo "${RESET}" - exit 1 - fi -} - -function book-check-restheader-leftovers() -{ - NAME="$1" - echo "${STD_COLOR}##### checking for restheader leftovers in ${NAME}${RESET}" - ERRORS=$(find "ppbooks/${NAME}" -not \( -path "ppbooks/Drivers/SpringData/*" -prune \) -name "*.md" -exec grep -- '^@[A-Z]*' {} \; -print) - if test "$(echo -n "${ERRORS}" | wc -l)" -gt 0; then - echo "${ERR_COLOR}" - echo "found these unconverted Swagger Restapi tags: " - echo "${ERRORS}" - echo "${RESET}" - exit 1 - fi -} - -function ppbook-precheck-bad-code-sections() -{ - NAME="$1" - echo "${STD_COLOR}##### checking for bad code sections in ${NAME}${RESET}" - if grep -qR "^${TRIPPLETICS} *.* " "${NAME}"; then - echo "${ERR_COLOR}" - echo "tripple tics with blanks afterwards found: " - grep -R "^${TRIPPLETICS} *.* " "${NAME}" - echo "${RESET}" - exit 1 - fi -} - -function ppbook-precheck-bad-headings() -{ - NAME="$1" - echo "${STD_COLOR}##### checking for headers that won't proper display on github in ${NAME}${RESET}" - if grep -qRI '^##*[a-zA-Z]' "${NAME}"; then - echo "${ERR_COLOR}" - echo "Headlines broken on github found: " - grep -RI '^##*[a-zA-Z]' "${NAME}" - echo "${RESET}" - exit 1 - fi -} - -function ppbook-check-html-link() -{ - NAME="$1" - MSG="$2" - echo "${STD_COLOR}##### checking for invalid HTML links in ${NAME}${RESET}" - echo "${ALLBOOKS}" | tr " " "\n" | sed -e 's;^;/;' -e 's;$;/;' > /tmp/books.regex - - set +e - grep -r -E '\[.*\]\(.*\)' "ppbooks/${NAME}"| \ - grep '\.md:'| grep 'html'| \ - grep -v 'http://' | \ - grep -v 'https://' | \ - grep -v 'header.css' | \ - grep -v -f /tmp/books.regex > /tmp/relative_html_links.txt - set -e - - if test "$(wc -l < /tmp/relative_html_links.txt)" -gt 0; then - echo "${ERR_COLOR}" - echo "Found links to .html files inside of the document! use .md instead!" - echo "${MSG}" - cat /tmp/relative_html_links.txt - echo "${RESET}" - exit 1 - fi -} - -function ppbook-check-directory-link() -{ - NAME="$1" - echo "${STD_COLOR}##### checking for invalid md links in ${NAME}${RESET}" - set +e - ERRORS=$(grep -r -E '\[.*\]\(.*\)' "ppbooks/${NAME}" | \ - grep '\.md:' | \ - grep -v html | \ - grep -v http://| \ - grep -v https:// | \ - grep -v header.css | \ - grep -v node_modules | \ - grep -v node_modules | \ - grep -v '\.md') - set -e - nERRORS=$(echo -n "${ERRORS}" | wc -l) - if test "$nERRORS" -gt 0; then - echo "${ERR_COLOR}" - echo "Found director links! use ..//README.md instead!" - echo "${ERRORS}" - echo "${RESET}" - exit 1 - fi -} - -function book-check-markdown-leftovers() -{ - NAME="$1" - echo "${STD_COLOR}##### checking for remaining markdown snippets in the HTML output of ${NAME}${RESET}" - ERRORS=$(find "books/${NAME}" -name '*.html' -exec grep -- '^##' {} \; -print) - if test "$(echo -n "${ERRORS}" | wc -l)" -gt 0; then - echo "${ERR_COLOR}"; - echo "found these unconverted markdown titles: " - echo "${ERRORS}" - echo "${RESET}"; - exit 1; - fi - - set +e - ERRORS=$(find "books/${NAME}" -name '*.html' -exec grep -- '&gt;' {} \; -print) - set -e - if test "$(echo -n "${ERRORS}" | wc -l)" -gt 0; then - echo "${ERR_COLOR}" - echo "found these double converted > signs: " - echo "${ERRORS}" - echo "${RESET}" - exit 1; - fi - - set +e - ERRORS=$(find "books/${NAME}" -name '*.html' -exec grep '"[a-zA-Z/\.]*\.md\"[ />]' {} \; -print | grep -v data-filepath) - set -e - if test "$(echo -n "${ERRORS}" | wc -l)" -gt 0; then - echo "${ERR_COLOR}" - echo "${ERRORS}" - echo "found dangling markdown links; see the list above " - echo "${RESET}" - exit 1 - fi - - set +e - ERRORS=$(find "books/${NAME}" -name '*.html' -exec grep '"[a-zA-Z/\.]*\.md#' {} \; -print) - set -e - if test "$(echo -n "${ERRORS}" | wc -l)" -gt 0; then - echo "${ERR_COLOR}" - echo "found dangling markdown links: " - echo "${ERRORS}" - echo "${RESET}" - exit 1 - fi - - set +e - ERRORS=$(find "books/${NAME}" -name '*.html' -exec grep "${TRIPPLETICS}" {} \; -print) - set -e - if test "$(echo -n "${ERRORS}" | wc -l)" -gt 0; then - echo "${ERR_COLOR}" - echo "found dangling markdown code sections: " - echo "${ERRORS}" - echo "${RESET}" - exit 1 - fi - - set +e - ERRORS=$(find "books/${NAME}" -name '*.html' -exec grep '\] "/tmp/tags/${dir}/${fn}" - done - - fail=0 - rm -f /tmp/failduplicatetags.txt - find /tmp/tags -type f | while IFS= read -r htmlf; do - (sort "${htmlf}" |grep -v ^$ > /tmp/sorted.txt) || true - (sort -u "${htmlf}" |grep -v ^$ > /tmp/sortedunique.txt) || true - if test "$(comm -3 /tmp/sorted.txt /tmp/sortedunique.txt|wc -l)" -ne 0; then - echo "${ERR_COLOR}" - echo "in ${htmlf}: " - comm -3 /tmp/sorted.txt /tmp/sortedunique.txt - echo "${RESET}" - touch /tmp/failduplicatetags.txt - fi - done - - rm -f /tmp/sorted.txt /tmp/sortedunique.txt - if test -f /tmp/failduplicatetags.txt; then - echo "${ERR_COLOR}" - echo "duplicate anchors detected - see above" - echo "${RESET}" - rm -f /tmp/failduplicatetags.txt - exit 1 - fi - - rm -f /tmp/anchorlist.txt - - echo "${STD_COLOR}##### fetching anchors from generated http files${RESET}" - for file in $(find books -name \*.html); do - # - strip of the menu - # - then the page tail. - # - remove links to external pages - cat $file | \ - sed -r -n -e '/normal markdown-section/,${p}'| \ - sed -r -n -e '/.*id="page-footer".*/q;p' | \ - grep ' /tmp/thisdoc.txt - # Links with anchors: - cat /tmp/thisdoc.txt |grep '#' | sed "s;\(.*\)#\(.*\);${file},\1,\2;" >> /tmp/anchorlist.txt - # links without anchors: - cat /tmp/thisdoc.txt |grep -v '#' | sed "s;\(.*\);${file},\1,;" >> /tmp/anchorlist.txt - - done - - echo "${STD_COLOR}##### cross checking anchors${RESET}" - NO=0 - echo "${NO}" > /tmp/anchorlistcount.txt - # shellcheck disable=SC2002 - cat /tmp/anchorlist.txt | while IFS= read -r i; do - ANCHOR=$(echo "$i" | cut '-d,' -f 3) - FN=$(echo "$i" | cut '-d,' -f 2) - SFN=$(echo "$i" | cut '-d,' -f 1) - - if test -z "$FN"; then - FN="$SFN" - else - SFNP=$(sed 's;/[a-zA-Z0-9.-]*.html;;' <<< "$SFN") - FN="${SFNP}/${FN}" - fi - if test -d "$FN"; then - FN="${FN}index.html" - fi - if test ! -f "/tmp/tags/${FN}"; then - echo "${ERR_COLOR}" - echo "File referenced by ${i} doesn't exist." - NO=$((NO + 1)) - echo "${RESET}" - else - if test -n "$ANCHOR"; then - if grep -q "^$ANCHOR$" "/tmp/tags/${FN}"; then - true - else - echo "${ERR_COLOR}" - echo "Anchor not found in $i" - NO=$((NO + 1)) - echo "${RESET}${WRN_COLOR}available anchors in that file:${RESET}${STD_COLOR}" - cat "/tmp/tags/${FN}" |sort - echo "${RESET}" - fi - fi - fi - echo "${NO}" > /tmp/anchorlistcount.txt - done - NO="$(cat /tmp/anchorlistcount.txt)" - if test "${NO}" -gt 0; then - echo "${ERR_COLOR}" - echo "${NO} Dangling anchors found!" - echo "${WRN_COLOR}" - echo "${1}" - echo "${RESET}" - exit 1 - fi - rm -rf /tmp/anchorlist.txt /tmp/tags -} - -function book-check-images-referenced() -{ - NAME="$1" - echo "${STD_COLOR}##### checking for unused image files ${NAME}${RESET}" - ERRORS=$(find "${NAME}" -name '*.png' | while IFS= read -r image; do - baseimage=$(basename "$image") - if ! grep -Rq "${baseimage}" "${NAME}"; then - printf "\n${image}" - fi - done - ) - if test "$(printf "${ERRORS}" | wc -l)" -gt 0; then - echo "${ERR_COLOR}"; - echo "the following images are not referenced by any page: " - echo "${ERRORS}" - echo "${RESET}"; - exit 1; - fi -} - -function build-book-symlinks() -{ - echo "${STD_COLOR}##### generate backwards compatibility symlinks for ${NAME}${RESET}" - cd "books/${NAME}" - pwd - find . -name "README.md" |\ - sed -e 's:README\.md$::' |\ - awk '{print "ln -s index.html " "$1" "README.html"}' |\ - bash -} - -function build-book() -{ - python ../Scripts/codeBlockReader.py || exit 1 - export NAME="$1" - echo "${STD_COLOR}##### Generating book ${NAME}${RESET}" - ppbook-precheck-bad-code-sections "${NAME}" - ppbook-precheck-bad-headings "${NAME}" - - if test ! -d "ppbooks/${NAME}"; then - mkdir -p "ppbooks/${NAME}" - WD=$(pwd) - find "${NAME}" -type d | while IFS= read -r dir; do - cd "${WD}/ppbooks" - test -d "${dir}" || mkdir -p "${dir}" - done - fi - if ditaa --help > /dev/null; then - echo "${STD_COLOR} - generating ditaa images${RESET}" - find "${NAME}" -name "*.ditaa" | while IFS= read -r image; do - mkdir -p $(dirname "ppbooks/${image//ditaa/png}") - ditaa "${image}" "ppbooks/${image//ditaa/png}" - done - else - echo "${ERR_COLOR} - generating FAKE ditaa images - no ditaa installed${RESET}" - find "${NAME}" -name "*.ditaa" | while IFS= read -r image; do - mkdir -p $(dirname "ppbooks/${image//ditaa/png}") - cp "../../js/node/node_modules/mocha/images/error.png" \ - "ppbooks/${image//ditaa/png}" - done - fi - echo "${STD_COLOR} - preparing environment${RESET}" - ( - cd "ppbooks/${NAME}" - if ! test -L SUMMARY.md; then - ln -s "../../${NAME}/SUMMARY.md" . - fi - if ! test -f FOOTER.html ; then - cp "../../${NAME}/FOOTER.html" . - fi - ) - - - ( - cd "ppbooks/${NAME}" - mkdir -p styles - cp -a "../../${NAME}/styles/"* styles/ - ) - WD=$(pwd) - - echo "${STD_COLOR} - generating MD-Files${RESET}" - python ../Scripts/generateMdFiles.py \ - "${NAME}" \ - ppbooks/ \ - ../../js/apps/system/_admin/aardvark/APP/api-docs.json \ - "${FILTER}" || exit 1 - - test -d "books/${NAME}" || mkdir -p "books/${NAME}" - - echo "${STD_COLOR} - Checking integrity ${VERSION}${RESET}" - check-summary "${NAME}" - book-check-leftover-docublocks "${NAME}" - book-check-restheader-leftovers "${NAME}" - ppbook-check-two-links "${NAME}" - ppbook-check-directory-link "${NAME}" - book-check-images-referenced "${NAME}" - - if echo "${newVersionNumber}" | grep -q devel; then - VERSION="${newVersionNumber} $(date +' %d. %b %Y ')" - RELEASE_DIRECTORY=devel - else - VERSION="${newVersionNumber}" - RELEASE_DIRECTORY=$(sed "s;\\.[0-9]*$;;" <<< "${newVersionNumber}") - fi - export VERSION - - if ! test -f "ppbooks/${NAME}/book.json" ; then - cp "${NAME}/book.json" "ppbooks/${NAME}" - fi - - for facilityfile in book.json styles/header.js README.md; do - export facilityfile - export RELEASE_DIRECTORY - ( - cd "ppbooks/${NAME}" - sed -e "s/VERSION_NUMBER/v${VERSION}/g" \ - -e "s;/devel;/${RELEASE_DIRECTORY};" \ - -e "s;@GSEARCH_ID@;${ALL_GSEARCH_ID[${NAME}]};" \ - -e "s;@GCHANGE_FREQ@;${GCHANGE_FREQ};" \ - -e "s;@GPRIORITY@;${GPRIORITY};" \ - -e "s;@BROWSEABLE_VERSIONS@;${BROWSEABLE_VERSIONS};" \ - \ - -i "${facilityfile}" - ) - done - - echo "${STD_COLOR} - Building Version ${VERSION}${RESET}" - - if test -d "${NODE_MODULES_DIR}"; then - echo "${STD_COLOR}#### Installing plugins from ${NODE_MODULES_DIR}${RESET}" - cp -a "${NODE_MODULES_DIR}" "ppbooks/${NAME}" - else - echo "${STD_COLOR}#### Downloading plugins from ${NODE_MODULES_DIR}${RESET}" - (cd "ppbooks/${NAME}"; gitbook install -g) - fi - echo "${STD_COLOR} - Building Book ${NAME} ${RESET}" - (cd "ppbooks/${NAME}" && gitbook "${GITBOOK_ARGS[@]}" build "./" "./../../books/${NAME}") - rm -f "./books/${NAME}/FOOTER.html" - echo "${STD_COLOR} - deleting markdown files in output (gitbook 3.x bug)" - find "./books/${NAME}/" -type f -name "*.md" -delete - - book-check-markdown-leftovers "${NAME}" -} - -function build-book-dist() -{ - NAME="$1" - export DISPLAY="$2" - cd "ppbooks/${NAME}" - for ext in ${OTHER_MIME}; do - OUTPUT="${OUTPUT_DIR}/ArangoDB_${NAME}_${newVersionNumber}.${ext}" - if gitbook "${GITBOOK_ARGS[@]}" "${ext}" ./ "${OUTPUT}"; then - echo "success building ${OUTPUT}" - else - exit 1 - fi - done -} - -function clean-book() -{ - NAME="$1" - rm -rf "books/${NAME}" - if test -z "${FILTER}"; then - rm -rf "ppbooks/${NAME}" - fi -} - -function clean-book-intermediate() -{ - NAME="$1" - if test -z "${FILTER}"; then - rm -rf "ppbooks/${NAME}" - fi - -} - -################################################################################ -# Global targets - - -#************************************************************ -# Check docublocks - checks whether docublock are -# - files in intermediate output directories and temporary -# files are excludes (with # in their names) -# - unique in the source -# - all docublocks are used somewhere in the documentation -# -function check-docublocks() -{ - grep -R '@startDocuBlock' --include "*.h" --include "*.cpp" --include "*.js" --include "*.md" . |\ - grep -v '@startDocuBlockInline' |\ - grep -v stash |\ - grep -v ppbook |\ - grep -v allComments.txt |\ - grep -v Makefile |\ - grep -v '.*~:.*' |\ - grep -v '.*#.*:.*' \ - > /tmp/rawindoc.txt - - grep -R '@startDocuBlockInline' --include "*.h" --include "*.cpp" --include "*.js" --include "*.md" . |\ - grep -v ppbook |\ - grep -v stash |\ - grep -v allComments.txt |\ - grep -v Makefile |\ - grep -v '.*~:.*' |\ - grep -v '.*#.*:.*' \ - >> /tmp/rawindoc.txt - - sed -e "s;\r$;;" -e "s;.*ck ;;" -e "s;.*ne ;;" < /tmp/rawindoc.txt |sort -u > /tmp/indoc.txt - - set +e - grep -R '^@startDocuBlock' ../DocuBlocks --include "*.md" |grep -v aardvark > /tmp/rawinprog.txt - # searching the Inline docublocks needs some more blacklisting: - grep -R '@startDocuBlockInline' --include "*.h" --include "*.cpp" --include "*.js" --include "*.md" . |\ - grep -v ppbook |\ - grep -v stash |\ - grep -v allComments.txt |\ - grep -v build.sh |\ - grep -v '.*~:.*' |\ - grep -v '.*#.*:.*' \ - >> /tmp/rawinprog.txt - - # These files are converted to docublocks on the fly and only live in memory. - for file in ../Examples/*.json ; do - echo "$file" |sed -e "s;.*/;Generated: @startDocuBlock program_options_;" -e "s;.json;;" >> /tmp/rawinprog.txt - done - set -e - echo "Generated: startDocuBlockInline errorCodes">> /tmp/rawinprog.txt - - sed -e "s;\r$;;" -e "s;.*ck ;;" -e "s;.*ne ;;" < /tmp/rawinprog.txt |sort > /tmp/inprog_raw.txt - sort -u < /tmp/inprog_raw.txt > /tmp/inprog.txt - - if test "$(wc -l < /tmp/inprog.txt)" -ne "$(wc -l < /tmp/inprog_raw.txt)"; then - echo "${ERR_COLOR}" - echo "Duplicate entry found in the source trees:" - comm -3 /tmp/inprog_raw.txt /tmp/inprog.txt - echo "${WRN_COLOR}" - echo "${1}" - echo "${RESET}" - exit 1 - fi - - if test "$(comm -3 /tmp/indoc.txt /tmp/inprog.txt |wc -l)" -ne 0; then - echo "${ERR_COLOR}" - echo "Not all blocks were found on both sides:" - echo "Documentation | Programcode:" - comm -3 /tmp/indoc.txt /tmp/inprog.txt - if test "$(comm -2 -3 /tmp/indoc.txt /tmp/inprog.txt |wc -l)" -gt 0; then - echo "Documentation: " - for grepit in $(comm -2 -3 /tmp/indoc.txt /tmp/inprog.txt); do - grep "$grepit" /tmp/rawindoc.txt - done - fi - if test "$(comm -1 -3 /tmp/indoc.txt /tmp/inprog.txt |wc -l)" -gt 0; then - echo "Program code:" - for grepit in $(comm -1 -3 /tmp/indoc.txt /tmp/inprog.txt); do - grep "$grepit" /tmp/rawinprog.txt | sed "s;/// @startDocuBlock;\t\t;" - done - fi - echo "${WRN_COLOR}" - echo "${1}" - echo "${RESET}" - exit 1 - fi -} - -function clean-intermediate() -{ - NAME=$1 - FILTER=$2 - clean-book-intermediate "${NAME}" "${FILTER}" -} - -function clean() -{ - NAME=$1 - clean-intermediate "${NAME}" - rm -f allComments.txt -} - -function build-book-keep-md() -{ - NAME="$1" - test -d books || mkdir books - build-book "${NAME}" -} - -function build-books() -{ - rm -rf /tmp/tags - set -e - for book in ${ALLBOOKS}; do - clean-intermediate "${book}" - done - - for book in ${ALLBOOKS}; do - build-book-keep-md "${book}" - done - - for book in ${ALLBOOKS}; do - ppbook-check-html-link "${book}" "" - done - - check-docublocks "" - echo "${STD_COLOR}##### Generating redirect index.html${RESET}"; \ - echo '' > books/index.html - check-dangling-anchors "" -} - -function build-dist-books() -{ - set -e - if test -z "${OUTPUT_DIR}"; then - echo "please specify --outputDir" - exit 1 - fi - rm -rf books ppbooks - PIDFILE=/tmp/xvfb_20_0.pid - if test "${isCygwin}" -eq 0 -a -z "${DISPLAY}"; then - DISPLAY=:20.0 - start_X11_display "${PIDFILE}" "${DISP}" - trap 'stop_X11_display "${PIDFILE}"' 0 - fi - export DISPLAY - - WD=$(pwd) - build-books - mkdir -p "${OUTPUT_DIR}" - ( - mv books "ArangoDB-${newVersionNumber}" - pwd - tar -czf "${OUTPUT_DIR}/ArangoDB-${newVersionNumber}.tar.gz" "ArangoDB-${newVersionNumber}" - mv "ArangoDB-${newVersionNumber}" books - ) - for book in $ALLBOOKS; do - cd "$WD"; build-book-dist "${book}" - done -} - -function printHelp() -{ - cat < -`-h` - -Prints a list of the most common options available and then exists. -In order to see all options use `--help-all`. - -`--log level` - -Allows the user to choose the level of information which is logged by -the server. The "level" is specified as a string and can be one of -the following values: fatal, error, warning, info, debug or trace. For -more information see [here](../Administration/Configuration/Logging.md). - - - -@startDocuBlock server_authentication - -`--daemon` - -Runs the server as a "daemon" (as a background process). - diff --git a/Documentation/Books/stash/ClusterArchitecture.md b/Documentation/Books/stash/ClusterArchitecture.md deleted file mode 100644 index dcdec1987b08..000000000000 --- a/Documentation/Books/stash/ClusterArchitecture.md +++ /dev/null @@ -1,39 +0,0 @@ - -Apache Mesos integration ------------------------- - -For the distributed setup, we use the Apache Mesos infrastructure by default. - -ArangoDB is a fully certified package for DC/OS and can thus -be deployed essentially with a few mouse clicks or a single command, once -you have an existing DC/OS cluster. But even on a plain Apache Mesos cluster -one can deploy ArangoDB via Marathon with a single API call and some JSON -configuration. - -The advantage of this approach is that we can not only implement the -initial deployment, but also the later management of automatic -replacement of failed instances and the scaling of the ArangoDB cluster -(triggered manually or even automatically). Since all manipulations are -either via the graphical web UI or via JSON/REST calls, one can even -implement auto-scaling very easily. - -A DC/OS cluster is a very natural environment to deploy microservice -architectures, since it is so convenient to deploy various services, -including potentially multiple ArangoDB cluster instances within the -same DC/OS cluster. The built-in service discovery makes it extremely -simple to connect the various microservices and Mesos automatically -takes care of the distribution and deployment of the various tasks. - -See the [Deployment](../../../Deployment/README.md) chapter and its subsections -for instructions. - -It is possible to deploy an ArangoDB cluster by simply launching a bunch of -Docker containers with the right command line options to link them up, -or even on a single machine starting multiple ArangoDB processes. In that -case, synchronous replication will work within the deployed ArangoDB cluster, -and automatic fail-over in the sense that the duties of a failed server will -automatically be assigned to another, surviving one. However, since the -ArangoDB cluster cannot within itself launch additional instances, replacement -of failed nodes is not automatic and scaling up and down has to be managed -manually. This is why we do not recommend this setup for production -deployment. diff --git a/Documentation/Books/stash/CollectionsAndDocuments.md b/Documentation/Books/stash/CollectionsAndDocuments.md deleted file mode 100644 index 4260a32f28c5..000000000000 --- a/Documentation/Books/stash/CollectionsAndDocuments.md +++ /dev/null @@ -1,175 +0,0 @@ -Starting the JavaScript shell ------------------------------ - -The easiest way to connect to the database is the JavaScript shell -_arangosh_. You can either start it from the command-line or as an -embedded version in the browser. Using the command-line tool has the -advantage that you can use autocompletion. - - unix> arangosh --server.password "" - _ - __ _ _ __ __ _ _ __ __ _ ___ ___| |__ - / _` | '__/ _` | '_ \ / _` |/ _ \/ __| '_ \ - | (_| | | | (_| | | | | (_| | (_) \__ \ | | | - \__,_|_| \__,_|_| |_|\__, |\___/|___/_| |_| - |___/ - - Welcome to arangosh 2.x.y. Copyright (c) 2012 triAGENS GmbH. - Using Google V8 4.1.0.27 JavaScript engine. - Using READLINE 6.1. - - Connected to Arango DB 127.0.0.1:8529 Version 2.2.0 - - arangosh> help - ------------------------------------- Help ------------------------------------- - Predefined objects: - arango: ArangoConnection - db: ArangoDatabase - fm: FoxxManager - Example: - > db._collections(); list all collections - > db._create() create a new collection - > db._drop() drop a collection - > db..toArray() list all documents - > id = db..save({ ... }) save a document - > db..remove(<_id>) delete a document - > db..document(<_id>) retrieve a document - > db..replace(<_id>, {...}) overwrite a document - > db..update(<_id>, {...}) partially update a document - > db..exists(<_id>) check if document exists - > db._query().toArray() execute an AQL query - > db._useDatabase() switch database - > db._createDatabase() create a new database - > db._databases() list existing databases - > help show help pages - > exit - arangosh> - -This gives you a prompt where you can issue JavaScript commands. - -The standard setup does not require a password. Depending on your -setup you might need to specify the endpoint, username and password -in order to run the shell on your system. You can use the options -`--server.endpoint`, `--server.username` and `--server.password` for -this. - - unix> arangosh --server.endpoint tcp://127.0.0.1:8529 --server.username root - -A default configuration is normally installed under -*/etc/arangodb/arangosh.conf*. It contains a default endpoint and an -empty password. - -Querying for Documents ----------------------- - -All documents are stored in collections. All collections are stored in a -database. The database object is accessible via the variable *db*. - -Creating a collection is simple. You can use the *_create* method -of the *db* variable. - - @startDocuBlockInline 01_workWithColl_create - @EXAMPLE_ARANGOSH_OUTPUT{01_workWithColl_create} - ~addIgnoreCollection("example") - db._create("example"); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 01_workWithColl_create - -After the collection has been created you can easily access it using -the path *db.example*. The collection currently shows as *loaded*, -meaning that it is loaded into memory. If you restart the server and -access the collection again it will now show as *unloaded*. You can -also manually unload a collection. - - @startDocuBlockInline 02_workWithColl_unload - @EXAMPLE_ARANGOSH_OUTPUT{02_workWithColl_unload} - db.example.unload(); - db.example; - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 02_workWithColl_unload - -Whenever you use a collection ArangoDB will automatically load it -into memory for you. - -In order to create new documents in a collection use the *save* -operation. - - @startDocuBlockInline 03_workWithColl_save - @EXAMPLE_ARANGOSH_OUTPUT{03_workWithColl_save} - db.example.save({ Hello : "World" }); - db.example.save({ "name" : "John Doe", "age" : 29 }); - db.example.save({ "name" : "Jane Smith", "age" : 31 }); - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 03_workWithColl_save - -Just storing documents would be no fun. We now want to select some of -the stored documents again. In order to select all elements of a -collection, one can use the *toArray* method: - - @startDocuBlockInline 04_workWithColl_directAcess - @EXAMPLE_ARANGOSH_OUTPUT{04_workWithColl_directAcess} - db.example.toArray() - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 04_workWithColl_directAcess - -Now we want to look for a person with a given name. We can use -*byExample* for this. The method returns an array of documents -matching a given example. - - @startDocuBlockInline 05_workWithColl_byExample - @EXAMPLE_ARANGOSH_OUTPUT{05_workWithColl_byExample} - db.example.byExample({ name: "Jane Smith" }).toArray() - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 05_workWithColl_byExample - -While the *byExample* works very well for simple queries where you -combine the conditions with an `and`. The syntax above becomes messy for *joins* -and *or* conditions. Therefore ArangoDB also supports a full-blown -query language, AQL. To run an AQL query, use the *db._query* method:. - - @startDocuBlockInline 05_workWithColl_AQL_STR - @EXAMPLE_ARANGOSH_OUTPUT{05_workWithColl_AQL_STR} - db._query('FOR user IN example FILTER user.name == "Jane Smith" RETURN user').toArray() - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 05_workWithColl_AQL_STR - -Searching for all persons with an age above 30: - - @startDocuBlockInline 06_workWithColl_AOQL_INT - @EXAMPLE_ARANGOSH_OUTPUT{06_workWithColl_AOQL_INT} - db._query('FOR user IN example FILTER user.age > 30 RETURN user').toArray() - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 06_workWithColl_AOQL_INT - - -John was put in there by mistake – so let's delete him again. We fetch -the `_id` using *byExample*: - - @startDocuBlockInline 07_workWithColl_remove - @EXAMPLE_ARANGOSH_OUTPUT{07_workWithColl_remove} - db.example.remove(db.example.byExample({ name: "John Doe" }).toArray()[0]._id) - db.example.toArray() - ~removeIgnoreCollection("example") - ~db._drop("example") - @END_EXAMPLE_ARANGOSH_OUTPUT - @endDocuBlock 07_workWithColl_remove - - -You can learn all about the query language [AQL](../../AQL/index.html). Note that -*_query* is a short-cut for *_createStatement* and *execute*. We will -come back to these functions when we talk about cursors. - -ArangoDB's Front-End --------------------- - -The ArangoDB server has a graphical front-end, which allows you to -inspect the current state of the server from within your browser. You -can use the front-end using the following URL: - - http://localhost:8529/ - -The front-end allows you to browse through the collections and -documents. If you need to administrate the database, please use -the ArangoDB shell described in the next section. - - diff --git a/Documentation/Books/stash/DocumentRevisionMVCC.md b/Documentation/Books/stash/DocumentRevisionMVCC.md deleted file mode 100644 index 29cfbc2eeeed..000000000000 --- a/Documentation/Books/stash/DocumentRevisionMVCC.md +++ /dev/null @@ -1,59 +0,0 @@ -As ArangoDB uses MVCC (Multiple Version Concurrency Control) -internally, documents can exist in more than one revision. -The document revision is the MVCC token used to specify -a particular revision of a document (identified by its `_id`). - - - -It is a string value that contained (up to ArangoDB 3.0) -an integer number and is unique within the list of document -revisions for a single document. -In ArangoDB >= 3.1 the _rev strings -are in fact time stamps. They use the local clock of the DBserver that -actually writes the document and have millisecond accuracy. -A [_Hybrid Logical Clock_](http://www.cse.buffalo.edu/tech-reports/2014-04.pdf) -is used. - -In a single server setup, `_rev` values are unique across all documents -and all collections. In a cluster setup, -within one shard it is guaranteed that two different document revisions -have a different `_rev` string, even if they are written in the same -millisecond, and that these stamps are ascending. - -Note however that different servers in your cluster might have a clock -skew, and therefore between different shards or even between different -collections the time stamps are not guaranteed to be comparable. - -The Hybrid Logical Clock feature does one thing to address this -issue: Whenever a message is sent from some server A in your cluster to -another one B, it is ensured that any timestamp taken on B after the -message has arrived is greater than any timestamp taken on A before the -message was sent. This ensures that if there is some "causality" between -events on different servers, time stamps increase from cause to effect. -A direct consequence of this is that sometimes a server has to take -timestamps that seem to come from the future of its own clock. It will -however still produce ever increasing timestamps. If the clock skew is -small, then your timestamps will relatively accurately describe the time -when the document revision was actually written. - -ArangoDB uses 64bit unsigned integer values to maintain -document revisions internally. At this stage we intentionally do not -document the exact format of the revision values. When returning -document revisions to -clients, ArangoDB will put them into a string to ensure the revision -is not clipped by clients that do not support big integers. Clients -should treat the revision returned by ArangoDB as an opaque string -when they store or use it locally. This will allow ArangoDB to change -the format of revisions later if this should be required (as has happened -with 3.1 with the Hybrid Logical Clock). Clients can -use revisions to perform simple equality/non-equality comparisons -(e.g. to check whether a document has changed or not), but they should -not use revision ids to perform greater/less than comparisons with them -to check if a document revision is older than one another, even if this -might work for some cases. - -Document revisions can be used to conditionally query, update, replace -or delete documents in the database. - -In order to find a particular revision of a document, you need the document -handle or key, and the document revision. diff --git a/Documentation/Books/stash/Durability.md b/Documentation/Books/stash/Durability.md deleted file mode 100644 index fbbebb88d80e..000000000000 --- a/Documentation/Books/stash/Durability.md +++ /dev/null @@ -1,91 +0,0 @@ -Durability Configuration -======================== - -Global Configuration --------------------- - -**Pre-setting on database creation** - -There are global configuration values for durability, which can be adjusted by -specifying the following configuration options: - -@startDocuBlock databaseWaitForSync - - -@startDocuBlock databaseForceSyncProperties - - -@startDocuBlock WalLogfileSyncInterval - -`--rocksdb.sync-interval` - -The interval (in milliseconds) that ArangoDB will use to automatically -synchronize data in RocksDB's write-ahead logs to disk. Automatic syncs will -only be performed for not-yet synchronized data, and only for operations that -have been executed without the *waitForSync* attribute. - -**Adjusting at run-time** - -The total amount of disk storage required by the MMFiles engine is determined by the size of -the write-ahead logfiles plus the sizes of the collection journals and datafiles. - -There are the following options for configuring the number and sizes of the write-ahead -logfiles: - - -@startDocuBlock WalLogfileReserveLogfiles - - - -@startDocuBlock WalLogfileHistoricLogfiles - - - -@startDocuBlock WalLogfileSize - - - -@startDocuBlock WalLogfileAllowOversizeEntries - - -When data gets copied from the write-ahead logfiles into the journals or datafiles -of collections, files will be created on the collection level. How big these files -are is determined by the following global configuration value: - - -@startDocuBlock databaseMaximalJournalSize - - - -Per-collection configuration ----------------------------- -**Pre-setting during collection creation** - -You can also configure the durability behavior on a per-collection basis. -Use the ArangoDB shell to change these properties. - - -@startDocuBlock collectionProperties - - -**Adjusting at run-time** - -The journal size can also be adjusted on a per-collection level using the collection's -*properties* method. - - -Per-operation configuration ---------------------------- - -Many data-modification operations and also ArangoDB's transactions allow to specify -a *waitForSync* attribute, which when set ensures the operation data has been -synchronized to disk when the operation returns. - - -Disk-Usage Configuration (MMFiles engine) ------------------------------------------ - -The amount of disk space used by the MMFiles engine is determined by a few configuration -options. - - diff --git a/Documentation/Books/stash/FileSystems.md b/Documentation/Books/stash/FileSystems.md deleted file mode 100644 index a86b4b2f6d7f..000000000000 --- a/Documentation/Books/stash/FileSystems.md +++ /dev/null @@ -1,14 +0,0 @@ - - -Filesystems ------------ - -As one would expect for a database, we recommend a locally mounted filesystems. - -NFS or similar network filesystems will not work. - -On Linux we recommend the use of ext4fs, on Windows NTFS and on macOS HFS+. - -We recommend to **not** use BTRFS on Linux. It is known to not work well in conjunction with ArangoDB. -We experienced that ArangoDB faces latency issues on accessing its database files on BTRFS partitions. -In conjunction with BTRFS and AUFS we also saw data loss on restart. diff --git a/Documentation/Books/stash/Foxx/ErrorHandling.md b/Documentation/Books/stash/Foxx/ErrorHandling.md deleted file mode 100644 index 52e10749f34f..000000000000 --- a/Documentation/Books/stash/Foxx/ErrorHandling.md +++ /dev/null @@ -1,52 +0,0 @@ -Error handling -============== - -Foxx automatically catches errors in your routes and generates machine-readable error responses for them, as well as logging them to the ArangoDB server log. - -If the error is an `ArangoError` thrown by the ArangoDB API (such as when trying to use `collection.document` to access a document that does not exist) or explicitly thrown using the [`res.throw` method](), Foxx will convert the error to a JSON response body with an appropriate HTTP status code. Otherwise Foxx will simply generate a generic JSON error response body with a HTTP 500 status code. - -Catching ArangoDB errors ------------------------- - -The ArangoDB JavaScript API will generally throw instances of the `ArangoError` type - -Better stuff ------------- - -Instead: - -Have application specific error objects with numeric codes to help API users, e.g. -```js -class MyFoxxError extends Error { - constructor (code, status = 500) { - super(`My Foxx Error #${code}`); - this.code = code; - this.status = status; - } - toJSON () { - return {error: true, code: this.code, status: this.status}; - } -} -``` - -Look into `require('@arangodb').errors` when checking ArangoError codes to determine what went wrong and throw appropriate custom error. - -Try to handle expected errors in each route and rethrow an application-specific error. - -In root index.js have something like -```js -const UNEXPECTED_ERROR = 1234; -module.context.use((req, res, next) => { - try { - next(); - } catch (e) { - if (e instanceof MyFoxxError) { - res.status(e.status); - res.json(e); - } else { - res.status(500); - res.json(new MyFoxxError(UNEXPECTED_ERROR)); - } - } -}) -``` \ No newline at end of file diff --git a/Documentation/Books/stash/Foxx/Multipart.md b/Documentation/Books/stash/Foxx/Multipart.md deleted file mode 100644 index 61c770eed4a9..000000000000 --- a/Documentation/Books/stash/Foxx/Multipart.md +++ /dev/null @@ -1,2 +0,0 @@ -Multipart -========= \ No newline at end of file diff --git a/Documentation/Books/stash/Foxx/Troubleshooting.md b/Documentation/Books/stash/Foxx/Troubleshooting.md deleted file mode 100644 index 07fc0015e495..000000000000 --- a/Documentation/Books/stash/Foxx/Troubleshooting.md +++ /dev/null @@ -1,10 +0,0 @@ -Troubleshooting -=============== - -Check arangod.log for errors - -Make sure to check surrounding stacktraces because they may be related - -When catching ArangoError exceptions use `console.errorStack(err)` to log as internal errors may have been rethrown and that function preserves the full stack trace. - -Common errors/problems observed by community and how to solve? \ No newline at end of file diff --git a/Documentation/Books/stash/Foxx/TypeScript.md b/Documentation/Books/stash/Foxx/TypeScript.md deleted file mode 100644 index d30119aeb2e3..000000000000 --- a/Documentation/Books/stash/Foxx/TypeScript.md +++ /dev/null @@ -1,27 +0,0 @@ -TypeScript -========== - -Have all code in subfolder - -``` - manifest.json - src/ - api/ - index.js - ... - scripts/ - setup.js -``` -Use `tsc` with `tsconfig.json` like - -```json -{ - "compilerOptions": { - // ... - "outDir": "./build/", - "rootDir": "./src/" - } -} -``` - -Add build dir to `.gitignore`, add src dir to `.foxxignore`, use `foxx-cli` to install. diff --git a/Documentation/Books/stash/ServerInternals.md b/Documentation/Books/stash/ServerInternals.md deleted file mode 100644 index 4f975c88a3bf..000000000000 --- a/Documentation/Books/stash/ServerInternals.md +++ /dev/null @@ -1,70 +0,0 @@ -Server-side db-Object implementation ------------------------------------- - -We [already talked about the arangosh db Object implementation](../Programs/Arangosh/README.md), Now a little more about the server version, so the following examples won't work properly in arangosh. - -Server-side methods of the *db object* will return an `[object ShapedJson]`. This datatype is a very lightweight JavaScript object that contains an internal pointer to where the document data are actually stored in memory or on disk. Especially this is not a fullblown copy of the document's complete data. - -When such an object's property is accessed, this will invoke an accessor function. For example, accessing `doc.name` of such an object will call a C++ function behind the scenes to fetch the actual value for the property `name`. When a property is written to this, it will also trigger an accessor function. This accessor function will first copy all property values into the object's own properties, and then discard the pointer to the data in memory. From this point on, the accessor functions will not do anything special, so the object will behave like a normal JavaScript object. - -All of this is done for performance reasons. It often allows ommitting the creation of big JavaScript objects that contain lots of data. For example, if all thats needed from a document is a single property, fully constructing the document as a JavaScript object has a high overhead (CPU time for processing, memory, plus later V8 garbage collection). - -Here's an example: -```js -var ShapedJson = require("@arangodb").ShapedJson; -// fetches document from collection into a JavaScript object -var doc = db.test.any(); - -// check if the document object is a shaped object -// returns true for shaped objects, false otherwise -doc instanceof ShapedJson; - -// invokes the property read accessor. returns property value byValue -doc.name; - -// invokes the property write accessor. will copy document data -// into the JavaScript object once -// and store the value in the property as requested -doc.name = "test"; - -// doc will now behave like a regular object -doc.foo = "bar"; -``` - -There is one gotcha though with such objects: the accessor functions are only invoked when accessing top level properties. When accessing nested properties, the accessor will only be called for the top level property, which will then return the requested property's value. Accessing a subproperty of this returned property however does not have a reference to the original object. - -Here's an example: - -```js -// create an object with a nested property -db.test.save({ name: { first: "Jan" } }); -doc; -{ - "_id" : "test/2056013422404", - "_rev" : "2056013422404", - "_key" : "2056013422404", - "name" : { - "first" : "Jan" - } -} - -// now modify the nested property -doc.name.first = "test"; -doc; -{ - "_id" : "test/2056013422404", - "_rev" : "2056013422404", - "_key" : "2056013422404", - "name" : { - "first" : "Jan" /* oops */ - } -} -``` - -So what happened here? The statement `doc.name.first = "test"` calls the read accessor for property `name` first. This produces an object `{"name":"Jan"}` whose property `first` is modifed directly afterwards. But the object `{"name":"Jan"}` is a temporary object and not the same (`===`) object as `doc.name`. This is why updating the nested property effectively failed. - -There is no way to detect this in a read accessor unfortunately. It does not have any idea about what will be done with the returned object. So this case cannot be tracked/trapped in the accessor. - -A workaround for this problem would be to clone the object in the user code if the document is going to be modified. This will make all modification safe. The cloning can also be made conditional for cases when the object is an instance of `ShapedJson` or when nested properties are to be accessed. Cloning is not required when the object is no `ShapedJson` or when only top level properties are accessed. - -Only those documents that are stored in a collections datafile will be returned as `ShapedJson`. The documents, that are still in the write-ahead-log, will always be returned as regular JavaScript objects, as they are not yet shaped and the optimization would not work. However, when a document is transfered from the write-ahead-log to the collection's datafile cannot be safely predicted by an application, so the same document can be returned in one way or the other. The only safe way is to check if the object is an instance of `ShapedJson` as above. diff --git a/Documentation/Books/stash/WebInterface.md b/Documentation/Books/stash/WebInterface.md deleted file mode 100644 index b6674da47487..000000000000 --- a/Documentation/Books/stash/WebInterface.md +++ /dev/null @@ -1,16 +0,0 @@ -Accessing the Web Interface -=========================== - -ArangoDB comes with a built-in web interface for administration. The web -interface can be accessed via the URL: - -``` -http://127.0.0.1:8529 -``` - -If everything works as expected, you should see the login view: - -![Login View](../Programs/WebInterface/images/loginView.png) - -For more information on the ArangoDB web interface, see -[Web Interface](../Programs/WebInterface/README.md) diff --git a/Documentation/DocuBlocks/collectionProperties.md b/Documentation/DocuBlocks/collectionProperties.md index 78ed0e54e19c..850ee44efce7 100644 --- a/Documentation/DocuBlocks/collectionProperties.md +++ b/Documentation/DocuBlocks/collectionProperties.md @@ -54,7 +54,7 @@ In a cluster setup, the result will also contain the following attributes: * *replicationFactor*: determines how many copies of each shard are kept on different DBServers. Has to be in the range of 1-10 *(Cluster only)* - * *minReplicationFactor* : determines the number of minimal shard copies kept on +* *minReplicationFactor* : determines the number of minimal shard copies kept on different DBServers, a shard will refuse to write, if less then this amount of copies are in sync. Has to be in the range of 1-replicationFactor *(Cluster only)* diff --git a/Documentation/Examples/.gitkeep b/Documentation/Examples/.gitkeep new file mode 100644 index 000000000000..068f750cdd41 --- /dev/null +++ b/Documentation/Examples/.gitkeep @@ -0,0 +1,5 @@ +Git can not track empty repositories. +This file ensures that the Examples directory keeps existing +even with the generated files being in the new docs repository. +Some of the old documentation building scripts are still used +by the new system which copies the examples into this folder. \ No newline at end of file diff --git a/Documentation/Examples/001_collectionAll.generated b/Documentation/Examples/001_collectionAll.generated deleted file mode 100644 index a1a0b6159fbe..000000000000 --- a/Documentation/Examples/001_collectionAll.generated +++ /dev/null @@ -1,63 +0,0 @@ -arangosh> db.five.insert({ name : "one" }); -{ - "_id" : "five/112", - "_key" : "112", - "_rev" : "_YOn09b---_" -} -arangosh> db.five.insert({ name : "two" }); -{ - "_id" : "five/116", - "_key" : "116", - "_rev" : "_YOn09b---B" -} -arangosh> db.five.insert({ name : "three" }); -{ - "_id" : "five/119", - "_key" : "119", - "_rev" : "_YOn09bC--_" -} -arangosh> db.five.insert({ name : "four" }); -{ - "_id" : "five/122", - "_key" : "122", - "_rev" : "_YOn09bC--B" -} -arangosh> db.five.insert({ name : "five" }); -{ - "_id" : "five/125", - "_key" : "125", - "_rev" : "_YOn09bC--D" -} -arangosh> db.five.all().toArray(); -[ - { - "_key" : "119", - "_id" : "five/119", - "_rev" : "_YOn09bC--_", - "name" : "three" - }, - { - "_key" : "112", - "_id" : "five/112", - "_rev" : "_YOn09b---_", - "name" : "one" - }, - { - "_key" : "116", - "_id" : "five/116", - "_rev" : "_YOn09b---B", - "name" : "two" - }, - { - "_key" : "125", - "_id" : "five/125", - "_rev" : "_YOn09bC--D", - "name" : "five" - }, - { - "_key" : "122", - "_id" : "five/122", - "_rev" : "_YOn09bC--B", - "name" : "four" - } -] diff --git a/Documentation/Examples/002_collectionAllNext.generated b/Documentation/Examples/002_collectionAllNext.generated deleted file mode 100644 index 04a582d220b1..000000000000 --- a/Documentation/Examples/002_collectionAllNext.generated +++ /dev/null @@ -1,45 +0,0 @@ -arangosh> db.five.insert({ name : "one" }); -{ - "_id" : "five/141", - "_key" : "141", - "_rev" : "_YOn09ce--_" -} -arangosh> db.five.insert({ name : "two" }); -{ - "_id" : "five/145", - "_key" : "145", - "_rev" : "_YOn09ce--B" -} -arangosh> db.five.insert({ name : "three" }); -{ - "_id" : "five/148", - "_key" : "148", - "_rev" : "_YOn09ci--_" -} -arangosh> db.five.insert({ name : "four" }); -{ - "_id" : "five/151", - "_key" : "151", - "_rev" : "_YOn09ci--B" -} -arangosh> db.five.insert({ name : "five" }); -{ - "_id" : "five/154", - "_key" : "154", - "_rev" : "_YOn09ci--D" -} -arangosh> db.five.all().limit(2).toArray(); -[ - { - "_key" : "141", - "_id" : "five/141", - "_rev" : "_YOn09ce--_", - "name" : "one" - }, - { - "_key" : "151", - "_id" : "five/151", - "_rev" : "_YOn09ci--B", - "name" : "four" - } -] diff --git a/Documentation/Examples/003_collectionByExample.generated b/Documentation/Examples/003_collectionByExample.generated deleted file mode 100644 index f351fc9dfbb9..000000000000 --- a/Documentation/Examples/003_collectionByExample.generated +++ /dev/null @@ -1,52 +0,0 @@ -arangosh> db.users.insert({ name: "Gerhard" }); -{ - "_id" : "users/170", - "_key" : "170", - "_rev" : "_YOn09de--_" -} -arangosh> db.users.insert({ name: "Helmut" }); -{ - "_id" : "users/174", - "_key" : "174", - "_rev" : "_YOn09de--B" -} -arangosh> db.users.insert({ name: "Angela" }); -{ - "_id" : "users/177", - "_key" : "177", - "_rev" : "_YOn09de--D" -} -arangosh> db.users.all().toArray(); -[ - { - "_key" : "174", - "_id" : "users/174", - "_rev" : "_YOn09de--B", - "name" : "Helmut" - }, - { - "_key" : "170", - "_id" : "users/170", - "_rev" : "_YOn09de--_", - "name" : "Gerhard" - }, - { - "_key" : "177", - "_id" : "users/177", - "_rev" : "_YOn09de--D", - "name" : "Angela" - } -] -arangosh> db.users.byExample({ "_id" : "users/20" }).toArray(); -[ ] -arangosh> db.users.byExample({ "name" : "Gerhard" }).toArray(); -[ - { - "_key" : "170", - "_id" : "users/170", - "_rev" : "_YOn09de--_", - "name" : "Gerhard" - } -] -arangosh> db.users.byExample({ "name" : "Helmut", "_id" : "users/15" }).toArray(); -[ ] diff --git a/Documentation/Examples/004_collectionByExampleNext.generated b/Documentation/Examples/004_collectionByExampleNext.generated deleted file mode 100644 index 2ffae1cde688..000000000000 --- a/Documentation/Examples/004_collectionByExampleNext.generated +++ /dev/null @@ -1,26 +0,0 @@ -arangosh> db.users.insert({ name: "Gerhard" }); -{ - "_id" : "users/196", - "_key" : "196", - "_rev" : "_YOn09ee--B" -} -arangosh> db.users.insert({ name: "Helmut" }); -{ - "_id" : "users/200", - "_key" : "200", - "_rev" : "_YOn09ei--_" -} -arangosh> db.users.insert({ name: "Angela" }); -{ - "_id" : "users/203", - "_key" : "203", - "_rev" : "_YOn09ei--B" -} -arangosh> var a = db.users.byExample( {"name" : "Angela" } ); -arangosh> while (a.hasNext()) print(a.next()); -{ - "_key" : "203", - "_id" : "users/203", - "_rev" : "_YOn09ei--B", - "name" : "Angela" -} diff --git a/Documentation/Examples/005_collectionRange.generated b/Documentation/Examples/005_collectionRange.generated deleted file mode 100644 index 096167d0c36a..000000000000 --- a/Documentation/Examples/005_collectionRange.generated +++ /dev/null @@ -1,46 +0,0 @@ -arangosh> db.old.ensureIndex({ type: "skiplist", fields: [ "age" ] }); -{ - "deduplicate" : true, - "fields" : [ - "age" - ], - "id" : "old/218", - "isNewlyCreated" : true, - "sparse" : false, - "type" : "skiplist", - "unique" : false, - "code" : 201 -} -arangosh> db.old.insert({ age: 15 }); -{ - "_id" : "old/221", - "_key" : "221", - "_rev" : "_YOn09gi--_" -} -arangosh> db.old.insert({ age: 25 }); -{ - "_id" : "old/225", - "_key" : "225", - "_rev" : "_YOn09gi--B" -} -arangosh> db.old.insert({ age: 30 }); -{ - "_id" : "old/228", - "_key" : "228", - "_rev" : "_YOn09gm--_" -} -arangosh> db.old.range("age", 10, 30).toArray(); -[ - { - "_key" : "221", - "_id" : "old/221", - "_rev" : "_YOn09gi--_", - "age" : 15 - }, - { - "_key" : "225", - "_id" : "old/225", - "_rev" : "_YOn09gi--B", - "age" : 25 - } -] diff --git a/Documentation/Examples/006_collectionClosedRange.generated b/Documentation/Examples/006_collectionClosedRange.generated deleted file mode 100644 index 02dbfb3fdcb2..000000000000 --- a/Documentation/Examples/006_collectionClosedRange.generated +++ /dev/null @@ -1,52 +0,0 @@ -arangosh> db.old.ensureIndex({ type: "skiplist", fields: [ "age" ] }); -{ - "deduplicate" : true, - "fields" : [ - "age" - ], - "id" : "old/245", - "isNewlyCreated" : true, - "sparse" : false, - "type" : "skiplist", - "unique" : false, - "code" : 201 -} -arangosh> db.old.insert({ age: 15 }); -{ - "_id" : "old/248", - "_key" : "248", - "_rev" : "_YOn09im--_" -} -arangosh> db.old.insert({ age: 25 }); -{ - "_id" : "old/252", - "_key" : "252", - "_rev" : "_YOn09im--B" -} -arangosh> db.old.insert({ age: 30 }); -{ - "_id" : "old/255", - "_key" : "255", - "_rev" : "_YOn09iq--_" -} -arangosh> db.old.closedRange("age", 10, 30).toArray(); -[ - { - "_key" : "248", - "_id" : "old/248", - "_rev" : "_YOn09im--_", - "age" : 15 - }, - { - "_key" : "252", - "_id" : "old/252", - "_rev" : "_YOn09im--B", - "age" : 25 - }, - { - "_key" : "255", - "_id" : "old/255", - "_rev" : "_YOn09iq--_", - "age" : 30 - } -] diff --git a/Documentation/Examples/007_collectionNear.generated b/Documentation/Examples/007_collectionNear.generated deleted file mode 100644 index 72dda5f76cf6..000000000000 --- a/Documentation/Examples/007_collectionNear.generated +++ /dev/null @@ -1,45 +0,0 @@ -arangosh> db.geo.ensureIndex({ type: "geo", fields: [ "loc" ] }); -{ - "bestIndexedLevel" : 17, - "fields" : [ - "loc" - ], - "geoJson" : false, - "id" : "geo/271", - "isNewlyCreated" : true, - "maxNumCoverCells" : 8, - "sparse" : true, - "type" : "geo", - "unique" : false, - "worstIndexedLevel" : 4, - "code" : 201 -} -arangosh> for (var i = -90; i <= 90; i += 10) { -........> for (var j = -180; j <= 180; j += 10) { -........> db.geo.save({ -........> name : "Name/" + i + "/" + j, -........> loc: [ i, j ] }); -........> } } -arangosh> db.geo.near(0, 0).limit(2).toArray(); -[ - { - "_key" : "1328", - "_id" : "geo/1328", - "_rev" : "_YOn09o6--D", - "name" : "Name/0/0", - "loc" : [ - 0, - 0 - ] - }, - { - "_key" : "1217", - "_id" : "geo/1217", - "_rev" : "_YOn09nS--_", - "name" : "Name/-10/0", - "loc" : [ - -10, - 0 - ] - } -] diff --git a/Documentation/Examples/008_collectionNearDistance.generated b/Documentation/Examples/008_collectionNearDistance.generated deleted file mode 100644 index 1894b56febe4..000000000000 --- a/Documentation/Examples/008_collectionNearDistance.generated +++ /dev/null @@ -1,47 +0,0 @@ -arangosh> db.geo.ensureIndex({ type: "geo", fields: [ "loc" ] }); -{ - "bestIndexedLevel" : 17, - "fields" : [ - "loc" - ], - "geoJson" : false, - "id" : "geo/2399", - "isNewlyCreated" : true, - "maxNumCoverCells" : 8, - "sparse" : true, - "type" : "geo", - "unique" : false, - "worstIndexedLevel" : 4, - "code" : 201 -} -arangosh> for (var i = -90; i <= 90; i += 10) { -........> for (var j = -180; j <= 180; j += 10) { -........> db.geo.save({ -........> name : "Name/" + i + "/" + j, -........> loc: [ i, j ] }); -........> } } -arangosh> db.geo.near(0, 0).distance().limit(2).toArray(); -[ - { - "_id" : "geo/3456", - "_key" : "3456", - "_rev" : "_YOn09za--H", - "loc" : [ - 0, - 0 - ], - "name" : "Name/0/0", - "distance" : 0 - }, - { - "_id" : "geo/3345", - "_key" : "3345", - "_rev" : "_YOn09zC--_", - "loc" : [ - -10, - 0 - ], - "name" : "Name/-10/0", - "distance" : 1111949.2664455874 - } -] diff --git a/Documentation/Examples/009_collectionWithin.generated b/Documentation/Examples/009_collectionWithin.generated deleted file mode 100644 index e372467e56fa..000000000000 --- a/Documentation/Examples/009_collectionWithin.generated +++ /dev/null @@ -1,105 +0,0 @@ -arangosh> for (var i = -90; i <= 90; i += 10) { -........> for (var j = -180; j <= 180; j += 10) { -........> db.geo.save({ name : "Name/" + i + "/" + j, loc: [ i, j ] }); } } -arangosh> db.geo.within(0, 0, 2000 * 1000).distance().toArray(); -[ - { - "_id" : "geo/5584", - "_key" : "5584", - "_rev" : "_YOn099m--J", - "loc" : [ - 0, - 0 - ], - "name" : "Name/0/0", - "distance" : 0 - }, - { - "_id" : "geo/5587", - "_key" : "5587", - "_rev" : "_YOn099q--_", - "loc" : [ - 0, - 10 - ], - "name" : "Name/0/10", - "distance" : 1111949.2664455874 - }, - { - "_id" : "geo/5581", - "_key" : "5581", - "_rev" : "_YOn099m--H", - "loc" : [ - 0, - -10 - ], - "name" : "Name/0/-10", - "distance" : 1111949.2664455874 - }, - { - "_id" : "geo/5695", - "_key" : "5695", - "_rev" : "_YOn1--C--B", - "loc" : [ - 10, - 0 - ], - "name" : "Name/10/0", - "distance" : 1111949.2664455874 - }, - { - "_id" : "geo/5473", - "_key" : "5473", - "_rev" : "_YOn099O--B", - "loc" : [ - -10, - 0 - ], - "name" : "Name/-10/0", - "distance" : 1111949.2664455874 - }, - { - "_id" : "geo/5692", - "_key" : "5692", - "_rev" : "_YOn1--C--_", - "loc" : [ - 10, - -10 - ], - "name" : "Name/10/-10", - "distance" : 1568520.5567985761 - }, - { - "_id" : "geo/5698", - "_key" : "5698", - "_rev" : "_YOn1--C--D", - "loc" : [ - 10, - 10 - ], - "name" : "Name/10/10", - "distance" : 1568520.5567985761 - }, - { - "_id" : "geo/5470", - "_key" : "5470", - "_rev" : "_YOn099O--_", - "loc" : [ - -10, - -10 - ], - "name" : "Name/-10/-10", - "distance" : 1568520.5567985761 - }, - { - "_id" : "geo/5476", - "_key" : "5476", - "_rev" : "_YOn099O--D", - "loc" : [ - -10, - 10 - ], - "name" : "Name/-10/10", - "distance" : 1568520.5567985761 - } -] diff --git a/Documentation/Examples/010_documentsCollectionRemoveByExample.generated b/Documentation/Examples/010_documentsCollectionRemoveByExample.generated deleted file mode 100644 index fcd928158794..000000000000 --- a/Documentation/Examples/010_documentsCollectionRemoveByExample.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> db.example.removeByExample( {Hello : "world"} ); -1 diff --git a/Documentation/Examples/011_documentsCollectionReplaceByExample.generated b/Documentation/Examples/011_documentsCollectionReplaceByExample.generated deleted file mode 100644 index e428937ecbb9..000000000000 --- a/Documentation/Examples/011_documentsCollectionReplaceByExample.generated +++ /dev/null @@ -1,8 +0,0 @@ -arangosh> db.example.insert({ Hello : "world" }); -{ - "_id" : "example/6675", - "_key" : "6675", - "_rev" : "_YOn1-Cy--B" -} -arangosh> db.example.replaceByExample({ Hello: "world" }, {Hello: "mars"}, false, 5); -1 diff --git a/Documentation/Examples/012_documentsCollectionUpdateByExample.generated b/Documentation/Examples/012_documentsCollectionUpdateByExample.generated deleted file mode 100644 index 6a6515ed07f2..000000000000 --- a/Documentation/Examples/012_documentsCollectionUpdateByExample.generated +++ /dev/null @@ -1,19 +0,0 @@ -arangosh> db.example.insert({ Hello : "world", foo : "bar" }); -{ - "_id" : "example/6695", - "_key" : "6695", - "_rev" : "_YOn1-D2--_" -} -arangosh> db.example.updateByExample({ Hello: "world" }, { Hello: "foo", World: "bar" }, false); -1 -arangosh> db.example.byExample({ Hello: "foo" }).toArray() -[ - { - "_key" : "6695", - "_id" : "example/6695", - "_rev" : "_YOn1-D6--_", - "Hello" : "foo", - "foo" : "bar", - "World" : "bar" - } -] diff --git a/Documentation/Examples/01_workWithAQL_all.generated b/Documentation/Examples/01_workWithAQL_all.generated deleted file mode 100644 index 8bca7876e4c0..000000000000 --- a/Documentation/Examples/01_workWithAQL_all.generated +++ /dev/null @@ -1,12 +0,0 @@ -arangosh> db._create("mycollection") -[ArangoCollection 6709, "mycollection" (type document, status loaded)] -arangosh> db.mycollection.save({ _key: "testKey", Hello : "World" }) -{ - "_id" : "mycollection/testKey", - "_key" : "testKey", - "_rev" : "_YOn1-E6--_" -} -arangosh> db._query('FOR my IN mycollection RETURN my._key').toArray() -[ - "testKey" -] diff --git a/Documentation/Examples/01_workWithAQL_databaseExplain.generated b/Documentation/Examples/01_workWithAQL_databaseExplain.generated deleted file mode 100644 index 4aacc9e38733..000000000000 --- a/Documentation/Examples/01_workWithAQL_databaseExplain.generated +++ /dev/null @@ -1,24 +0,0 @@ -arangosh> db._explain("LET s = SLEEP(0.25) LET t = SLEEP(0.5) RETURN 1", {}, {colors: false}); -Query String: - LET s = SLEEP(0.25) LET t = SLEEP(0.5) RETURN 1 - -Execution plan: - Id NodeType Est. Comment - 1 SingletonNode 1 * ROOT - 4 CalculationNode 1 - LET #2 = 1 /* json expression */ /* const assignment */ - 2 CalculationNode 1 - LET s = SLEEP(0.25) /* simple expression */ - 3 CalculationNode 1 - LET t = SLEEP(0.5) /* simple expression */ - 5 ReturnNode 1 - RETURN #2 - -Indexes used: - none - -Functions used: - Name Deterministic Cacheable Uses V8 - SLEEP false false false - -Optimization rules applied: - Id RuleName - 1 move-calculations-up - - diff --git a/Documentation/Examples/01_workWithAQL_databaseProfileQuery.generated b/Documentation/Examples/01_workWithAQL_databaseProfileQuery.generated deleted file mode 100644 index 03265c740cc1..000000000000 --- a/Documentation/Examples/01_workWithAQL_databaseProfileQuery.generated +++ /dev/null @@ -1,35 +0,0 @@ -arangosh> db._profileQuery("LET s = SLEEP(0.25) LET t = SLEEP(0.5) RETURN 1", {}, {colors: false}); -Query String: - LET s = SLEEP(0.25) LET t = SLEEP(0.5) RETURN 1 - -Execution plan: - Id NodeType Calls Items Runtime [s] Comment - 1 SingletonNode 1 1 0.00000 * ROOT - 4 CalculationNode 1 1 0.00000 - LET #2 = 1 /* json expression */ /* const assignment */ - 2 CalculationNode 1 1 0.27102 - LET s = SLEEP(0.25) /* simple expression */ - 3 CalculationNode 1 1 0.51239 - LET t = SLEEP(0.5) /* simple expression */ - 5 ReturnNode 1 1 0.00001 - RETURN #2 - -Indexes used: - none - -Optimization rules applied: - Id RuleName - 1 move-calculations-up - -Query Statistics: - Writes Exec Writes Ign Scan Full Scan Index Filtered Exec Time [s] - 0 0 0 0 0 0.78371 - -Query Profile: - Query Stage Duration [s] - initializing 0.00000 - parsing 0.00002 - optimizing ast 0.00000 - loading collections 0.00000 - instantiating plan 0.00001 - optimizing plan 0.00005 - executing 0.78345 - finalizing 0.00017 - - diff --git a/Documentation/Examples/01_workWithAQL_profileQuerySimple.generated b/Documentation/Examples/01_workWithAQL_profileQuerySimple.generated deleted file mode 100644 index 2358cadefcb4..000000000000 --- a/Documentation/Examples/01_workWithAQL_profileQuerySimple.generated +++ /dev/null @@ -1,40 +0,0 @@ -arangosh> db._profileQuery(` -........> FOR doc IN acollection -........> FILTER doc.value < 10 -........> RETURN doc`, {}, {colors: false} -........> ); -Query String: - FOR doc IN acollection - FILTER doc.value < 10 - RETURN doc - -Execution plan: - Id NodeType Calls Items Runtime [s] Comment - 1 SingletonNode 1 1 0.00000 * ROOT - 2 EnumerateCollectionNode 11 10000 0.00319 - FOR doc IN acollection /* full collection scan */ - 3 CalculationNode 11 10000 0.00291 - LET #1 = (doc.`value` < 10) /* simple expression */ /* collections used: doc : acollection */ - 4 FilterNode 1 10 0.00053 - FILTER #1 - 5 ReturnNode 1 10 0.00000 - RETURN doc - -Indexes used: - none - -Optimization rules applied: - none - -Query Statistics: - Writes Exec Writes Ign Scan Full Scan Index Filtered Exec Time [s] - 0 0 10000 0 9990 0.00686 - -Query Profile: - Query Stage Duration [s] - initializing 0.00000 - parsing 0.00003 - optimizing ast 0.00000 - loading collections 0.00000 - instantiating plan 0.00002 - optimizing plan 0.00010 - executing 0.00664 - finalizing 0.00005 - - diff --git a/Documentation/Examples/02_workWithAQL_aqlCollectionQuery.generated b/Documentation/Examples/02_workWithAQL_aqlCollectionQuery.generated deleted file mode 100644 index 97e3a1fac857..000000000000 --- a/Documentation/Examples/02_workWithAQL_aqlCollectionQuery.generated +++ /dev/null @@ -1,11 +0,0 @@ -arangosh> var key = 'testKey'; -arangosh> db._query(aql`FOR doc IN ${ db.mycollection } RETURN doc` -........> ).toArray(); -[ - { - "_key" : "testKey", - "_id" : "mycollection/testKey", - "_rev" : "_YOn1-E6--_", - "Hello" : "World" - } -] diff --git a/Documentation/Examples/02_workWithAQL_aqlQuery.generated b/Documentation/Examples/02_workWithAQL_aqlQuery.generated deleted file mode 100644 index 45f4ba340d95..000000000000 --- a/Documentation/Examples/02_workWithAQL_aqlQuery.generated +++ /dev/null @@ -1,7 +0,0 @@ -arangosh> var key = 'testKey'; -arangosh> db._query( -........> aql`FOR c IN mycollection FILTER c._key == ${key} RETURN c._key` -........> ).toArray(); -[ - "testKey" -] diff --git a/Documentation/Examples/02_workWithAQL_bindValues.generated b/Documentation/Examples/02_workWithAQL_bindValues.generated deleted file mode 100644 index e2db2f28f6b4..000000000000 --- a/Documentation/Examples/02_workWithAQL_bindValues.generated +++ /dev/null @@ -1,8 +0,0 @@ -arangosh> db._query( -........> 'FOR c IN @@collection FILTER c._key == @key RETURN c._key', { -........> '@collection': 'mycollection', -........> 'key': 'testKey' -........> }).toArray(); -[ - "testKey" -] diff --git a/Documentation/Examples/02_workWithAQL_memoryLimit.generated b/Documentation/Examples/02_workWithAQL_memoryLimit.generated deleted file mode 100644 index 2ee2cd93e5f4..000000000000 --- a/Documentation/Examples/02_workWithAQL_memoryLimit.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> db._query( -........> 'FOR i IN 1..100000 SORT i RETURN i', {}, { -........> memoryLimit: 100000 -........> }).toArray(); -[ArangoError 32: AQL: query would use more memory than allowed (while executing)] diff --git a/Documentation/Examples/02_workWithAQL_profileQuerySimpleIndex.generated b/Documentation/Examples/02_workWithAQL_profileQuerySimpleIndex.generated deleted file mode 100644 index 8d1f2f27dc9c..000000000000 --- a/Documentation/Examples/02_workWithAQL_profileQuerySimpleIndex.generated +++ /dev/null @@ -1,42 +0,0 @@ -arangosh> db._profileQuery(` -........> FOR doc IN acollection -........> FILTER doc.value < 10 -........> RETURN doc`, {}, {colors: false} -........> ); -Query String: - FOR doc IN acollection - FILTER doc.value < 10 - RETURN doc - -Execution plan: - Id NodeType Calls Items Runtime [s] Comment - 1 SingletonNode 1 1 0.00000 * ROOT - 6 IndexNode 1 10 0.00002 - FOR doc IN acollection /* skiplist index scan */ - 5 ReturnNode 1 10 0.00000 - RETURN doc - -Indexes used: - By Type Collection Unique Sparse Selectivity Fields Ranges - 6 skiplist acollection false false n/a [ `value` ] (doc.`value` < 10) - -Optimization rules applied: - Id RuleName - 1 use-indexes - 2 remove-filter-covered-by-index - 3 remove-unnecessary-calculations-2 - -Query Statistics: - Writes Exec Writes Ign Scan Full Scan Index Filtered Exec Time [s] - 0 0 0 10 0 0.00022 - -Query Profile: - Query Stage Duration [s] - initializing 0.00000 - parsing 0.00003 - optimizing ast 0.00000 - loading collections 0.00000 - instantiating plan 0.00001 - optimizing plan 0.00008 - executing 0.00003 - finalizing 0.00005 - - diff --git a/Documentation/Examples/03_workWithAQL_getExtra.generated b/Documentation/Examples/03_workWithAQL_getExtra.generated deleted file mode 100644 index ae0ed31ecb88..000000000000 --- a/Documentation/Examples/03_workWithAQL_getExtra.generated +++ /dev/null @@ -1,17 +0,0 @@ -arangosh> db._query(`FOR i IN 1..100 -........> INSERT { _key: CONCAT('test', TO_STRING(i)) } -........> INTO mycollection` -........> ).getExtra(); -{ - "stats" : { - "writesExecuted" : 100, - "writesIgnored" : 0, - "scannedFull" : 0, - "scannedIndex" : 0, - "filtered" : 0, - "httpRequests" : 0, - "executionTime" : 0.00045371055603027344, - "peakMemoryUsage" : 50576 - }, - "warnings" : [ ] -} diff --git a/Documentation/Examples/03_workWithAQL_profileQuerySubquery.generated b/Documentation/Examples/03_workWithAQL_profileQuerySubquery.generated deleted file mode 100644 index b85dd69b3dad..000000000000 --- a/Documentation/Examples/03_workWithAQL_profileQuerySubquery.generated +++ /dev/null @@ -1,48 +0,0 @@ -arangosh> db._profileQuery(` -........> LET list = (FOR doc in acollection FILTER doc.value > 90 RETURN doc) -........> FOR a IN list -........> FILTER a.value < 91 -........> RETURN a`, {}, {colors: false, optimizer:{rules:["-all"]}} -........> ); -Query String: - LET list = (FOR doc in acollection FILTER doc.value > 90 RETURN doc) - FOR a IN list - FILTER a.value < 91 - RETURN a - -Execution plan: - Id NodeType Calls Items Runtime [s] Comment - 1 SingletonNode 1 1 0.00000 * ROOT - 7 SubqueryNode 1 1 0.01278 - LET list = ... /* const subquery */ - 2 SingletonNode 1 1 0.00000 * ROOT - 3 EnumerateCollectionNode 11 10000 0.00443 - FOR doc IN acollection /* full collection scan */ - 4 CalculationNode 11 10000 0.00400 - LET #5 = (doc.`value` > 90) /* simple expression */ /* collections used: doc : acollection */ - 5 FilterNode 10 9909 0.00270 - FILTER #5 - 6 ReturnNode 10 9909 0.00164 - RETURN doc - 8 EnumerateListNode 10 9909 0.00109 - FOR a IN list /* list iteration */ - 9 CalculationNode 10 9909 0.00237 - LET #7 = (a.`value` < 91) /* simple expression */ - 10 FilterNode 1 0 0.00042 - FILTER #7 - 11 ReturnNode 1 0 0.00000 - RETURN a - -Indexes used: - none - -Optimization rules applied: - none - -Query Statistics: - Writes Exec Writes Ign Scan Full Scan Index Filtered Exec Time [s] - 0 0 10000 0 10000 0.01700 - -Query Profile: - Query Stage Duration [s] - initializing 0.00000 - parsing 0.00005 - optimizing ast 0.00001 - loading collections 0.00001 - instantiating plan 0.00003 - optimizing plan 0.00009 - executing 0.01668 - finalizing 0.00012 - - diff --git a/Documentation/Examples/04_workWithAQL_profileQueryAggregation.generated b/Documentation/Examples/04_workWithAQL_profileQueryAggregation.generated deleted file mode 100644 index e41f09c3f0c0..000000000000 --- a/Documentation/Examples/04_workWithAQL_profileQueryAggregation.generated +++ /dev/null @@ -1,60 +0,0 @@ -arangosh> db._profileQuery(` -........> FOR u IN myusers -........> COLLECT ageGroup = FLOOR(u.age / 10) * 10 -........> AGGREGATE minAge = MIN(u.age), maxAge = MAX(u.age), len = LENGTH(u) -........> RETURN { -........> ageGroup, -........> minAge, -........> maxAge, -........> len -........> }`, {}, {colors: false} -........> ); -Query String: - FOR u IN myusers - COLLECT ageGroup = FLOOR(u.age / 10) * 10 - AGGREGATE minAge = MIN(u.age), maxAge = MAX(u.age), len = LENGTH(u) - RETURN { - ageGroup, - minAge, - maxAge, - len - } - -Execution plan: - Id NodeType Calls Items Runtime [s] Comment - 1 SingletonNode 1 1 0.00000 * ROOT - 2 EnumerateCollectionNode 1 20 0.00002 - FOR u IN myusers /* full collection scan */ - 3 CalculationNode 1 20 0.00002 - LET #5 = (FLOOR((u.`age` / 10)) * 10) /* simple expression */ /* collections used: u : myusers */ - 4 CalculationNode 1 20 0.00001 - LET #7 = u.`age` /* attribute expression */ /* collections used: u : myusers */ - 6 CollectNode 1 8 0.00002 - COLLECT ageGroup = #5 AGGREGATE minAge = MIN(#7), maxAge = MAX(#7), len = LENGTH(u) /* hash */ - 9 SortNode 1 8 0.00001 - SORT ageGroup ASC /* sorting strategy: standard */ - 7 CalculationNode 1 8 0.00001 - LET #11 = { "ageGroup" : ageGroup, "minAge" : minAge, "maxAge" : maxAge, "len" : len } /* simple expression */ - 8 ReturnNode 1 8 0.00000 - RETURN #11 - -Indexes used: - none - -Optimization rules applied: - Id RuleName - 1 move-calculations-up - 2 remove-redundant-calculations - 3 remove-unnecessary-calculations - 4 move-calculations-up-2 - 5 move-calculations-down - -Query Statistics: - Writes Exec Writes Ign Scan Full Scan Index Filtered Exec Time [s] - 0 0 20 0 0 0.00051 - -Query Profile: - Query Stage Duration [s] - initializing 0.00000 - parsing 0.00006 - optimizing ast 0.00001 - loading collections 0.00000 - instantiating plan 0.00003 - optimizing plan 0.00021 - executing 0.00010 - finalizing 0.00009 - - diff --git a/Documentation/Examples/04_workWithAQL_statements1.generated b/Documentation/Examples/04_workWithAQL_statements1.generated deleted file mode 100644 index 44df07e39870..000000000000 --- a/Documentation/Examples/04_workWithAQL_statements1.generated +++ /dev/null @@ -1,3 +0,0 @@ -arangosh> stmt = db._createStatement( { -........> "query": "FOR i IN [ 1, 2 ] RETURN i * 2" } ); -[object ArangoStatement] diff --git a/Documentation/Examples/05_workWithAQL_statements10.generated b/Documentation/Examples/05_workWithAQL_statements10.generated deleted file mode 100644 index 44648cdef8ca..000000000000 --- a/Documentation/Examples/05_workWithAQL_statements10.generated +++ /dev/null @@ -1,3 +0,0 @@ -arangosh> var c = stmt.execute(); -arangosh> c.count(); -4 diff --git a/Documentation/Examples/05_workWithAQL_statements2.generated b/Documentation/Examples/05_workWithAQL_statements2.generated deleted file mode 100644 index 5df43cbd0f68..000000000000 --- a/Documentation/Examples/05_workWithAQL_statements2.generated +++ /dev/null @@ -1,6 +0,0 @@ -arangosh> c = stmt.execute(); -[ - 2, - 4 -] -[object ArangoQueryCursor, count: 2, cached: false, hasMore: false] diff --git a/Documentation/Examples/05_workWithAQL_statements3.generated b/Documentation/Examples/05_workWithAQL_statements3.generated deleted file mode 100644 index b379deeb3073..000000000000 --- a/Documentation/Examples/05_workWithAQL_statements3.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> c.toArray(); -[ - 2, - 4 -] diff --git a/Documentation/Examples/05_workWithAQL_statements4.generated b/Documentation/Examples/05_workWithAQL_statements4.generated deleted file mode 100644 index 4187459eb799..000000000000 --- a/Documentation/Examples/05_workWithAQL_statements4.generated +++ /dev/null @@ -1,3 +0,0 @@ -arangosh> while (c.hasNext()) { require("@arangodb").print(c.next()); } -2 -4 diff --git a/Documentation/Examples/05_workWithAQL_statements5.generated b/Documentation/Examples/05_workWithAQL_statements5.generated deleted file mode 100644 index acbb85539ea2..000000000000 --- a/Documentation/Examples/05_workWithAQL_statements5.generated +++ /dev/null @@ -1,10 +0,0 @@ -arangosh> var stmt = db._createStatement( { -........> "query": "FOR i IN [ @one, @two ] RETURN i * 2" } ); -arangosh> stmt.bind("one", 1); -arangosh> stmt.bind("two", 2); -arangosh> c = stmt.execute(); -[ - 2, - 4 -] -[object ArangoQueryCursor, count: 2, cached: false, hasMore: false] diff --git a/Documentation/Examples/05_workWithAQL_statements6.generated b/Documentation/Examples/05_workWithAQL_statements6.generated deleted file mode 100644 index b379deeb3073..000000000000 --- a/Documentation/Examples/05_workWithAQL_statements6.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> c.toArray(); -[ - 2, - 4 -] diff --git a/Documentation/Examples/05_workWithAQL_statements7.generated b/Documentation/Examples/05_workWithAQL_statements7.generated deleted file mode 100644 index 4187459eb799..000000000000 --- a/Documentation/Examples/05_workWithAQL_statements7.generated +++ /dev/null @@ -1,3 +0,0 @@ -arangosh> while (c.hasNext()) { require("@arangodb").print(c.next()); } -2 -4 diff --git a/Documentation/Examples/05_workWithAQL_statements8.generated b/Documentation/Examples/05_workWithAQL_statements8.generated deleted file mode 100644 index bf83895d3658..000000000000 --- a/Documentation/Examples/05_workWithAQL_statements8.generated +++ /dev/null @@ -1,8 +0,0 @@ -arangosh> stmt = db._createStatement( { -........> "query": "FOR i IN [ @one, @two ] RETURN i * 2", -........> "bindVars": { -........> "one": 1, -........> "two": 2 -........> } -........> } ); -[object ArangoStatement] diff --git a/Documentation/Examples/05_workWithAQL_statements9.generated b/Documentation/Examples/05_workWithAQL_statements9.generated deleted file mode 100644 index 0a3fe67a749f..000000000000 --- a/Documentation/Examples/05_workWithAQL_statements9.generated +++ /dev/null @@ -1,4 +0,0 @@ -arangosh> stmt = db._createStatement( { -........> "query": "FOR i IN [ 1, 2, 3, 4 ] RETURN i", -........> "count": true } ); -[object ArangoStatement] diff --git a/Documentation/Examples/06_workWithAQL_statements11.generated b/Documentation/Examples/06_workWithAQL_statements11.generated deleted file mode 100644 index 64cec328c4fe..000000000000 --- a/Documentation/Examples/06_workWithAQL_statements11.generated +++ /dev/null @@ -1,4 +0,0 @@ -arangosh> stmt = db._createStatement( { -........> "query": "FOR i IN [ 1, 2, 3, 4 ] RETURN i", -........> options: {"profile": true}} ); -[object ArangoStatement] diff --git a/Documentation/Examples/06_workWithAQL_statements12.generated b/Documentation/Examples/06_workWithAQL_statements12.generated deleted file mode 100644 index 6e32723685d9..000000000000 --- a/Documentation/Examples/06_workWithAQL_statements12.generated +++ /dev/null @@ -1,25 +0,0 @@ -arangosh> var c = stmt.execute(); -arangosh> c.getExtra(); -{ - "stats" : { - "writesExecuted" : 0, - "writesIgnored" : 0, - "scannedFull" : 0, - "scannedIndex" : 0, - "filtered" : 0, - "httpRequests" : 0, - "executionTime" : 0.000095367431640625, - "peakMemoryUsage" : 34168 - }, - "warnings" : [ ], - "profile" : { - "initializing" : 0.0000011920928955078125, - "parsing" : 0.00001239776611328125, - "optimizing ast" : 0.000001430511474609375, - "loading collections" : 0.000001430511474609375, - "instantiating plan" : 0.000007152557373046875, - "optimizing plan" : 0.00003409385681152344, - "executing" : 0.000021457672119140625, - "finalizing" : 0.000011444091796875 - } -} diff --git a/Documentation/Examples/06_workWithAQL_statements13.generated b/Documentation/Examples/06_workWithAQL_statements13.generated deleted file mode 100644 index 5cfdee346d30..000000000000 --- a/Documentation/Examples/06_workWithAQL_statements13.generated +++ /dev/null @@ -1,50 +0,0 @@ -arangosh> db._parse( "FOR i IN [ 1, 2 ] RETURN i" ); -{ - "code" : 200, - "parsed" : true, - "collections" : [ ], - "bindVars" : [ ], - "ast" : [ - { - "type" : "root", - "subNodes" : [ - { - "type" : "for", - "subNodes" : [ - { - "type" : "variable", - "name" : "i", - "id" : 0 - }, - { - "type" : "array", - "subNodes" : [ - { - "type" : "value", - "value" : 1 - }, - { - "type" : "value", - "value" : 2 - } - ] - }, - { - "type" : "no-op" - } - ] - }, - { - "type" : "return", - "subNodes" : [ - { - "type" : "reference", - "name" : "i", - "id" : 0 - } - ] - } - ] - } - ] -} diff --git a/Documentation/Examples/06_workWithAQL_statementsExtra.generated b/Documentation/Examples/06_workWithAQL_statementsExtra.generated deleted file mode 100644 index 7d24b2c19f54..000000000000 --- a/Documentation/Examples/06_workWithAQL_statementsExtra.generated +++ /dev/null @@ -1,43 +0,0 @@ -arangosh> db._query(` -........> FOR i IN 1..@count INSERT -........> { _key: CONCAT('anothertest', TO_STRING(i)) } -........> INTO mycollection`, -........> {count: 100}, -........> {}, -........> {fullCount: true} -........> ).getExtra(); -{ - "stats" : { - "writesExecuted" : 100, - "writesIgnored" : 0, - "scannedFull" : 0, - "scannedIndex" : 0, - "filtered" : 0, - "httpRequests" : 0, - "fullCount" : 0, - "executionTime" : 0.0005605220794677734, - "peakMemoryUsage" : 50632 - }, - "warnings" : [ ] -} -arangosh> db._query({ -........> "query": `FOR i IN 200..@count INSERT -........> { _key: CONCAT('anothertest', TO_STRING(i)) } -........> INTO mycollection`, -........> "bindVars": {count: 300}, -........> "options": { fullCount: true} -........> }).getExtra(); -{ - "stats" : { - "writesExecuted" : 101, - "writesIgnored" : 0, - "scannedFull" : 0, - "scannedIndex" : 0, - "filtered" : 0, - "httpRequests" : 0, - "fullCount" : 0, - "executionTime" : 0.0005362033843994141, - "peakMemoryUsage" : 50632 - }, - "warnings" : [ ] -} diff --git a/Documentation/Examples/07_workWithAQL_statementsExplain.generated b/Documentation/Examples/07_workWithAQL_statementsExplain.generated deleted file mode 100644 index bdd6943440a8..000000000000 --- a/Documentation/Examples/07_workWithAQL_statementsExplain.generated +++ /dev/null @@ -1,73 +0,0 @@ -arangosh> var stmt = db._createStatement( -........> "FOR user IN _users RETURN user"); -arangosh> stmt.explain(); -{ - "plan" : { - "nodes" : [ - { - "type" : "SingletonNode", - "dependencies" : [ ], - "id" : 1, - "estimatedCost" : 1, - "estimatedNrItems" : 1 - }, - { - "type" : "EnumerateCollectionNode", - "dependencies" : [ - 1 - ], - "id" : 2, - "estimatedCost" : 3, - "estimatedNrItems" : 1, - "random" : false, - "outVariable" : { - "id" : 0, - "name" : "user" - }, - "projections" : [ ], - "producesResult" : true, - "database" : "_system", - "collection" : "_users", - "satellite" : false - }, - { - "type" : "ReturnNode", - "dependencies" : [ - 2 - ], - "id" : 3, - "estimatedCost" : 4, - "estimatedNrItems" : 1, - "inVariable" : { - "id" : 0, - "name" : "user" - }, - "count" : true - } - ], - "rules" : [ ], - "collections" : [ - { - "name" : "_users", - "type" : "read" - } - ], - "variables" : [ - { - "id" : 0, - "name" : "user" - } - ], - "estimatedCost" : 4, - "estimatedNrItems" : 1, - "initialize" : true, - "isModificationQuery" : false - }, - "warnings" : [ ], - "stats" : { - "rulesExecuted" : 35, - "rulesSkipped" : 0, - "plansCreated" : 1 - }, - "cacheable" : true -} diff --git a/Documentation/Examples/08_workWithAQL_statementsPlans.generated b/Documentation/Examples/08_workWithAQL_statementsPlans.generated deleted file mode 100644 index 823c915b3d0e..000000000000 --- a/Documentation/Examples/08_workWithAQL_statementsPlans.generated +++ /dev/null @@ -1,13 +0,0 @@ -arangosh> var formatPlan = function (plan) { -........> return { estimatedCost: plan.estimatedCost, -........> nodes: plan.nodes.map(function(node) { -........> return node.type; }) }; }; -arangosh> formatPlan(stmt.explain().plan); -{ - "estimatedCost" : 4, - "nodes" : [ - "SingletonNode", - "EnumerateCollectionNode", - "ReturnNode" - ] -} diff --git a/Documentation/Examples/09_workWithAQL_statementsPlansBind.generated b/Documentation/Examples/09_workWithAQL_statementsPlansBind.generated deleted file mode 100644 index 9d4cb6dbdaaf..000000000000 --- a/Documentation/Examples/09_workWithAQL_statementsPlansBind.generated +++ /dev/null @@ -1,142 +0,0 @@ -arangosh> var stmt = db._createStatement( -........> `FOR doc IN @@collection FILTER doc.user == @user RETURN doc` -........> ); -arangosh> stmt.bind({ "@collection" : "_users", "user" : "root" }); -arangosh> stmt.explain(); -{ - "plan" : { - "nodes" : [ - { - "type" : "SingletonNode", - "dependencies" : [ ], - "id" : 1, - "estimatedCost" : 1, - "estimatedNrItems" : 1 - }, - { - "type" : "IndexNode", - "dependencies" : [ - 1 - ], - "id" : 6, - "estimatedCost" : 1.95, - "estimatedNrItems" : 1, - "outVariable" : { - "id" : 0, - "name" : "doc" - }, - "projections" : [ ], - "producesResult" : true, - "database" : "_system", - "collection" : "_users", - "satellite" : false, - "needsGatherNodeSort" : false, - "indexCoversProjections" : false, - "indexes" : [ - { - "id" : "11", - "type" : "hash", - "fields" : [ - "user" - ], - "selectivityEstimate" : 1, - "unique" : true, - "sparse" : true, - "deduplicate" : true - } - ], - "condition" : { - "type" : "n-ary or", - "typeID" : 63, - "subNodes" : [ - { - "type" : "n-ary and", - "typeID" : 62, - "subNodes" : [ - { - "type" : "compare ==", - "typeID" : 25, - "excludesNull" : false, - "subNodes" : [ - { - "type" : "attribute access", - "typeID" : 35, - "name" : "user", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "doc", - "id" : 0 - } - ] - }, - { - "type" : "value", - "typeID" : 40, - "value" : "root", - "vType" : "string", - "vTypeID" : 4 - } - ] - } - ] - } - ] - }, - "sorted" : true, - "ascending" : true, - "reverse" : false, - "evalFCalls" : true, - "fullRange" : false, - "limit" : 0 - }, - { - "type" : "ReturnNode", - "dependencies" : [ - 6 - ], - "id" : 5, - "estimatedCost" : 2.95, - "estimatedNrItems" : 1, - "inVariable" : { - "id" : 0, - "name" : "doc" - }, - "count" : true - } - ], - "rules" : [ - "use-indexes", - "remove-filter-covered-by-index", - "remove-unnecessary-calculations-2" - ], - "collections" : [ - { - "name" : "_users", - "type" : "read" - } - ], - "variables" : [ - { - "id" : 2, - "name" : "1" - }, - { - "id" : 0, - "name" : "doc" - } - ], - "estimatedCost" : 2.95, - "estimatedNrItems" : 1, - "initialize" : true, - "isModificationQuery" : false - }, - "warnings" : [ ], - "stats" : { - "rulesExecuted" : 35, - "rulesSkipped" : 0, - "plansCreated" : 1 - }, - "cacheable" : true -} diff --git a/Documentation/Examples/10_workWithAQL_debugging1.generated b/Documentation/Examples/10_workWithAQL_debugging1.generated deleted file mode 100644 index 55c20f608b03..000000000000 --- a/Documentation/Examples/10_workWithAQL_debugging1.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> var query = "FOR doc IN mycollection FILTER doc.value > 42 RETURN doc"; -arangosh> require("@arangodb/aql/explainer").debugDump("/tmp/query-debug-info", query); diff --git a/Documentation/Examples/10_workWithAQL_debugging2.generated b/Documentation/Examples/10_workWithAQL_debugging2.generated deleted file mode 100644 index f02d58bb2ff6..000000000000 --- a/Documentation/Examples/10_workWithAQL_debugging2.generated +++ /dev/null @@ -1,3 +0,0 @@ -arangosh> var query = "FOR doc IN @@collection FILTER doc.value > @value RETURN doc"; -arangosh> var bind = { value: 42, "@collection": "mycollection" }; -arangosh> require("@arangodb/aql/explainer").debugDump("/tmp/query-debug-info", query, bind); diff --git a/Documentation/Examples/10_workWithAQL_debugging3.generated b/Documentation/Examples/10_workWithAQL_debugging3.generated deleted file mode 100644 index 41ae98f07a79..000000000000 --- a/Documentation/Examples/10_workWithAQL_debugging3.generated +++ /dev/null @@ -1,4 +0,0 @@ -arangosh> var query = "FOR doc IN @@collection FILTER doc.value > @value RETURN doc"; -arangosh> var bind = { value: 42, "@collection": "mycollection" }; -arangosh> var options = { examples: 10, anonymize: true }; -arangosh> require("@arangodb/aql/explainer").debugDump("/tmp/query-debug-info", query, bind, options); diff --git a/Documentation/Examples/10_workWithAQL_statementsPlansOptimizer0.generated b/Documentation/Examples/10_workWithAQL_statementsPlansOptimizer0.generated deleted file mode 100644 index 4602b2f970c7..000000000000 --- a/Documentation/Examples/10_workWithAQL_statementsPlansOptimizer0.generated +++ /dev/null @@ -1,4 +0,0 @@ -arangosh> var stmt = db._createStatement( -........> "FOR user IN _users FILTER user.user == 'root' RETURN user"); -arangosh> stmt.explain({ allPlans: true }).plans.length; -1 diff --git a/Documentation/Examples/10_workWithAQL_statementsPlansOptimizer1.generated b/Documentation/Examples/10_workWithAQL_statementsPlansOptimizer1.generated deleted file mode 100644 index bcb9529cd2ea..000000000000 --- a/Documentation/Examples/10_workWithAQL_statementsPlansOptimizer1.generated +++ /dev/null @@ -1,12 +0,0 @@ -arangosh> stmt.explain({ allPlans: true }).plans.map( -........> function(plan) { return formatPlan(plan); }); -[ - { - "estimatedCost" : 2.95, - "nodes" : [ - "SingletonNode", - "IndexNode", - "ReturnNode" - ] - } -] diff --git a/Documentation/Examples/10_workWithAQL_statementsPlansOptimizer2.generated b/Documentation/Examples/10_workWithAQL_statementsPlansOptimizer2.generated deleted file mode 100644 index ee09731beac5..000000000000 --- a/Documentation/Examples/10_workWithAQL_statementsPlansOptimizer2.generated +++ /dev/null @@ -1,131 +0,0 @@ -arangosh> stmt.explain({ optimizer: { -........> rules: [ "-all", "+remove-redundant-calculations" ] } }); -{ - "plan" : { - "nodes" : [ - { - "type" : "SingletonNode", - "dependencies" : [ ], - "id" : 1, - "estimatedCost" : 1, - "estimatedNrItems" : 1 - }, - { - "type" : "EnumerateCollectionNode", - "dependencies" : [ - 1 - ], - "id" : 2, - "estimatedCost" : 3, - "estimatedNrItems" : 1, - "random" : false, - "outVariable" : { - "id" : 0, - "name" : "user" - }, - "projections" : [ ], - "producesResult" : true, - "database" : "_system", - "collection" : "_users", - "satellite" : false - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 2 - ], - "id" : 3, - "estimatedCost" : 4, - "estimatedNrItems" : 1, - "expression" : { - "type" : "compare ==", - "typeID" : 25, - "excludesNull" : false, - "subNodes" : [ - { - "type" : "attribute access", - "typeID" : 35, - "name" : "user", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "user", - "id" : 0 - } - ] - }, - { - "type" : "value", - "typeID" : 40, - "value" : "root", - "vType" : "string", - "vTypeID" : 4 - } - ] - }, - "outVariable" : { - "id" : 2, - "name" : "1" - }, - "canThrow" : false, - "expressionType" : "simple" - }, - { - "type" : "FilterNode", - "dependencies" : [ - 3 - ], - "id" : 4, - "estimatedCost" : 5, - "estimatedNrItems" : 1, - "inVariable" : { - "id" : 2, - "name" : "1" - } - }, - { - "type" : "ReturnNode", - "dependencies" : [ - 4 - ], - "id" : 5, - "estimatedCost" : 6, - "estimatedNrItems" : 1, - "inVariable" : { - "id" : 0, - "name" : "user" - }, - "count" : true - } - ], - "rules" : [ ], - "collections" : [ - { - "name" : "_users", - "type" : "read" - } - ], - "variables" : [ - { - "id" : 2, - "name" : "1" - }, - { - "id" : 0, - "name" : "user" - } - ], - "estimatedCost" : 6, - "estimatedNrItems" : 1, - "initialize" : true, - "isModificationQuery" : false - }, - "warnings" : [ ], - "stats" : { - "rulesExecuted" : 3, - "rulesSkipped" : 32, - "plansCreated" : 1 - }, - "cacheable" : true -} diff --git a/Documentation/Examples/10_workWithAQL_statementsPlansOptimizer3.generated b/Documentation/Examples/10_workWithAQL_statementsPlansOptimizer3.generated deleted file mode 100644 index 8e72339e7cc3..000000000000 --- a/Documentation/Examples/10_workWithAQL_statementsPlansOptimizer3.generated +++ /dev/null @@ -1,20 +0,0 @@ -arangosh> var query = "FOR doc IN mycollection FILTER doc.value > 42 RETURN doc"; -arangosh> require("@arangodb/aql/explainer").explain(query, {colors:false}); -Query String: - FOR doc IN mycollection FILTER doc.value > 42 RETURN doc - -Execution plan: - Id NodeType Est. Comment - 1 SingletonNode 1 * ROOT - 2 EnumerateCollectionNode 302 - FOR doc IN mycollection /* full collection scan */ - 3 CalculationNode 302 - LET #1 = (doc.`value` > 42) /* simple expression */ /* collections used: doc : mycollection */ - 4 FilterNode 302 - FILTER #1 - 5 ReturnNode 302 - RETURN doc - -Indexes used: - none - -Optimization rules applied: - none - - diff --git a/Documentation/Examples/11_workWithAQL_parseQueries.generated b/Documentation/Examples/11_workWithAQL_parseQueries.generated deleted file mode 100644 index 442166c1070d..000000000000 --- a/Documentation/Examples/11_workWithAQL_parseQueries.generated +++ /dev/null @@ -1,70 +0,0 @@ -arangosh> var stmt = db._createStatement( -........> "FOR doc IN @@collection FILTER doc.foo == @bar RETURN doc"); -arangosh> stmt.parse(); -{ - "bindVars" : [ - "bar", - "@collection" - ], - "collections" : [ ], - "ast" : [ - { - "type" : "root", - "subNodes" : [ - { - "type" : "for", - "subNodes" : [ - { - "type" : "variable", - "name" : "doc", - "id" : 0 - }, - { - "type" : "datasource parameter", - "name" : "@collection" - }, - { - "type" : "no-op" - } - ] - }, - { - "type" : "filter", - "subNodes" : [ - { - "type" : "compare ==", - "excludesNull" : false, - "subNodes" : [ - { - "type" : "attribute access", - "name" : "foo", - "subNodes" : [ - { - "type" : "reference", - "name" : "doc", - "id" : 0 - } - ] - }, - { - "type" : "parameter", - "name" : "bar" - } - ] - } - ] - }, - { - "type" : "return", - "subNodes" : [ - { - "type" : "reference", - "name" : "doc", - "id" : 0 - } - ] - } - ] - } - ] -} diff --git a/Documentation/Examples/AQLEXP_01_axplainer.generated b/Documentation/Examples/AQLEXP_01_axplainer.generated deleted file mode 100644 index 3a0389d48911..000000000000 --- a/Documentation/Examples/AQLEXP_01_axplainer.generated +++ /dev/null @@ -1,46 +0,0 @@ -arangosh> db._create("test"); -[ArangoCollection 97227, "test" (type document, status loaded)] -arangosh> for (i = 0; i < 100; ++i) { db.test.save({ value: i }); } -arangosh> db.test.ensureIndex({ type: "skiplist", fields: [ "value" ] }); -{ - "deduplicate" : true, - "fields" : [ - "value" - ], - "id" : "test/97535", - "isNewlyCreated" : true, - "sparse" : false, - "type" : "skiplist", - "unique" : false, - "code" : 201 -} -arangosh> var explain = require("@arangodb/aql/explainer").explain; -arangosh> explain("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value", {colors:false}); -Query String: - FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value - -Execution plan: - Id NodeType Est. Comment - 1 SingletonNode 1 * ROOT - 9 IndexNode 50 - FOR i IN test /* skiplist index scan */ - 5 CalculationNode 50 - LET #3 = i.`value` /* attribute expression */ /* collections used: i : test */ - 8 ReturnNode 50 - RETURN #3 - -Indexes used: - By Type Collection Unique Sparse Selectivity Fields Ranges - 9 skiplist test false false n/a [ `value` ] (i.`value` > 97) - -Optimization rules applied: - Id RuleName - 1 move-calculations-up - 2 move-filters-up - 3 remove-redundant-calculations - 4 remove-unnecessary-calculations - 5 move-calculations-up-2 - 6 move-filters-up-2 - 7 use-indexes - 8 remove-filter-covered-by-index - 9 use-index-for-sort - 10 remove-unnecessary-calculations-2 - - diff --git a/Documentation/Examples/AQLEXP_01_explainCreate.generated b/Documentation/Examples/AQLEXP_01_explainCreate.generated deleted file mode 100644 index dca73276c278..000000000000 --- a/Documentation/Examples/AQLEXP_01_explainCreate.generated +++ /dev/null @@ -1,181 +0,0 @@ -arangosh> stmt = db._createStatement("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value"); -[object ArangoStatement] -arangosh> stmt.explain(); -{ - "plan" : { - "nodes" : [ - { - "type" : "SingletonNode", - "dependencies" : [ ], - "id" : 1, - "estimatedCost" : 1, - "estimatedNrItems" : 1 - }, - { - "type" : "IndexNode", - "dependencies" : [ - 1 - ], - "id" : 9, - "estimatedCost" : 7.643856189774724, - "estimatedNrItems" : 50, - "outVariable" : { - "id" : 0, - "name" : "i" - }, - "projections" : [ ], - "producesResult" : true, - "database" : "_system", - "collection" : "test", - "satellite" : false, - "needsGatherNodeSort" : true, - "indexCoversProjections" : false, - "indexes" : [ - { - "id" : "97535", - "type" : "skiplist", - "fields" : [ - "value" - ], - "unique" : false, - "sparse" : false, - "deduplicate" : true - } - ], - "condition" : { - "type" : "n-ary or", - "typeID" : 63, - "subNodes" : [ - { - "type" : "n-ary and", - "typeID" : 62, - "subNodes" : [ - { - "type" : "compare >", - "typeID" : 29, - "subNodes" : [ - { - "type" : "attribute access", - "typeID" : 35, - "name" : "value", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "i", - "id" : 0 - } - ] - }, - { - "type" : "value", - "typeID" : 40, - "value" : 97, - "vType" : "int", - "vTypeID" : 2 - } - ] - } - ] - } - ] - }, - "sorted" : true, - "ascending" : true, - "reverse" : false, - "evalFCalls" : true, - "fullRange" : false, - "limit" : 0 - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 9 - ], - "id" : 5, - "estimatedCost" : 57.64385618977472, - "estimatedNrItems" : 50, - "expression" : { - "type" : "attribute access", - "typeID" : 35, - "name" : "value", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "i", - "id" : 0 - } - ] - }, - "outVariable" : { - "id" : 4, - "name" : "3" - }, - "canThrow" : false, - "expressionType" : "attribute" - }, - { - "type" : "ReturnNode", - "dependencies" : [ - 5 - ], - "id" : 8, - "estimatedCost" : 107.64385618977472, - "estimatedNrItems" : 50, - "inVariable" : { - "id" : 4, - "name" : "3" - }, - "count" : true - } - ], - "rules" : [ - "move-calculations-up", - "move-filters-up", - "remove-redundant-calculations", - "remove-unnecessary-calculations", - "move-calculations-up-2", - "move-filters-up-2", - "use-indexes", - "remove-filter-covered-by-index", - "use-index-for-sort", - "remove-unnecessary-calculations-2" - ], - "collections" : [ - { - "name" : "test", - "type" : "read" - } - ], - "variables" : [ - { - "id" : 6, - "name" : "5" - }, - { - "id" : 4, - "name" : "3" - }, - { - "id" : 2, - "name" : "1" - }, - { - "id" : 0, - "name" : "i" - } - ], - "estimatedCost" : 107.64385618977472, - "estimatedNrItems" : 50, - "initialize" : true, - "isModificationQuery" : false - }, - "warnings" : [ ], - "stats" : { - "rulesExecuted" : 35, - "rulesSkipped" : 0, - "plansCreated" : 1 - }, - "cacheable" : true -} diff --git a/Documentation/Examples/AQLEXP_02_explainOverview.generated b/Documentation/Examples/AQLEXP_02_explainOverview.generated deleted file mode 100644 index 6ff46aa7137a..000000000000 --- a/Documentation/Examples/AQLEXP_02_explainOverview.generated +++ /dev/null @@ -1,7 +0,0 @@ -arangosh> stmt.explain().plan.nodes.map(function (node) { return node.type; }); -[ - "SingletonNode", - "IndexNode", - "CalculationNode", - "ReturnNode" -] diff --git a/Documentation/Examples/AQLEXP_03_explainRules.generated b/Documentation/Examples/AQLEXP_03_explainRules.generated deleted file mode 100644 index 1a00e75319e6..000000000000 --- a/Documentation/Examples/AQLEXP_03_explainRules.generated +++ /dev/null @@ -1,13 +0,0 @@ -arangosh> stmt.explain().plan.rules; -[ - "move-calculations-up", - "move-filters-up", - "remove-redundant-calculations", - "remove-unnecessary-calculations", - "move-calculations-up-2", - "move-filters-up-2", - "use-indexes", - "remove-filter-covered-by-index", - "use-index-for-sort", - "remove-unnecessary-calculations-2" -] diff --git a/Documentation/Examples/AQLEXP_04_explainCollections.generated b/Documentation/Examples/AQLEXP_04_explainCollections.generated deleted file mode 100644 index 2de23cbc36d0..000000000000 --- a/Documentation/Examples/AQLEXP_04_explainCollections.generated +++ /dev/null @@ -1,7 +0,0 @@ -arangosh> stmt.explain().plan.collections -[ - { - "name" : "test", - "type" : "read" - } -] diff --git a/Documentation/Examples/AQLEXP_05_explainAllPlans.generated b/Documentation/Examples/AQLEXP_05_explainAllPlans.generated deleted file mode 100644 index 30673f986dfa..000000000000 --- a/Documentation/Examples/AQLEXP_05_explainAllPlans.generated +++ /dev/null @@ -1,180 +0,0 @@ -arangosh> stmt.explain({ allPlans: true }); -{ - "plans" : [ - { - "nodes" : [ - { - "type" : "SingletonNode", - "dependencies" : [ ], - "id" : 1, - "estimatedCost" : 1, - "estimatedNrItems" : 1 - }, - { - "type" : "IndexNode", - "dependencies" : [ - 1 - ], - "id" : 9, - "estimatedCost" : 7.643856189774724, - "estimatedNrItems" : 50, - "outVariable" : { - "id" : 0, - "name" : "i" - }, - "projections" : [ ], - "producesResult" : true, - "database" : "_system", - "collection" : "test", - "satellite" : false, - "needsGatherNodeSort" : true, - "indexCoversProjections" : false, - "indexes" : [ - { - "id" : "97535", - "type" : "skiplist", - "fields" : [ - "value" - ], - "unique" : false, - "sparse" : false, - "deduplicate" : true - } - ], - "condition" : { - "type" : "n-ary or", - "typeID" : 63, - "subNodes" : [ - { - "type" : "n-ary and", - "typeID" : 62, - "subNodes" : [ - { - "type" : "compare >", - "typeID" : 29, - "subNodes" : [ - { - "type" : "attribute access", - "typeID" : 35, - "name" : "value", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "i", - "id" : 0 - } - ] - }, - { - "type" : "value", - "typeID" : 40, - "value" : 97, - "vType" : "int", - "vTypeID" : 2 - } - ] - } - ] - } - ] - }, - "sorted" : true, - "ascending" : true, - "reverse" : false, - "evalFCalls" : true, - "fullRange" : false, - "limit" : 0 - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 9 - ], - "id" : 5, - "estimatedCost" : 57.64385618977472, - "estimatedNrItems" : 50, - "expression" : { - "type" : "attribute access", - "typeID" : 35, - "name" : "value", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "i", - "id" : 0 - } - ] - }, - "outVariable" : { - "id" : 4, - "name" : "3" - }, - "canThrow" : false, - "expressionType" : "attribute" - }, - { - "type" : "ReturnNode", - "dependencies" : [ - 5 - ], - "id" : 8, - "estimatedCost" : 107.64385618977472, - "estimatedNrItems" : 50, - "inVariable" : { - "id" : 4, - "name" : "3" - }, - "count" : true - } - ], - "rules" : [ - "move-calculations-up", - "move-filters-up", - "remove-redundant-calculations", - "remove-unnecessary-calculations", - "move-calculations-up-2", - "move-filters-up-2", - "use-indexes", - "remove-filter-covered-by-index", - "use-index-for-sort", - "remove-unnecessary-calculations-2" - ], - "collections" : [ - { - "name" : "test", - "type" : "read" - } - ], - "variables" : [ - { - "id" : 6, - "name" : "5" - }, - { - "id" : 4, - "name" : "3" - }, - { - "id" : 2, - "name" : "1" - }, - { - "id" : 0, - "name" : "i" - } - ], - "estimatedCost" : 107.64385618977472, - "estimatedNrItems" : 50, - "initialize" : true, - "isModificationQuery" : false - } - ], - "warnings" : [ ], - "stats" : { - "rulesExecuted" : 35, - "rulesSkipped" : 0, - "plansCreated" : 1 - } -} diff --git a/Documentation/Examples/AQLEXP_06_explainUnoptimizedPlans.generated b/Documentation/Examples/AQLEXP_06_explainUnoptimizedPlans.generated deleted file mode 100644 index 4a8bfef83fce..000000000000 --- a/Documentation/Examples/AQLEXP_06_explainUnoptimizedPlans.generated +++ /dev/null @@ -1,214 +0,0 @@ -arangosh> stmt.explain({ optimizer: { rules: [ "-all" ] } }); -{ - "plan" : { - "nodes" : [ - { - "type" : "SingletonNode", - "dependencies" : [ ], - "id" : 1, - "estimatedCost" : 1, - "estimatedNrItems" : 1 - }, - { - "type" : "EnumerateCollectionNode", - "dependencies" : [ - 1 - ], - "id" : 2, - "estimatedCost" : 102, - "estimatedNrItems" : 100, - "random" : false, - "outVariable" : { - "id" : 0, - "name" : "i" - }, - "projections" : [ ], - "producesResult" : true, - "database" : "_system", - "collection" : "test", - "satellite" : false - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 2 - ], - "id" : 3, - "estimatedCost" : 202, - "estimatedNrItems" : 100, - "expression" : { - "type" : "compare >", - "typeID" : 29, - "subNodes" : [ - { - "type" : "attribute access", - "typeID" : 35, - "name" : "value", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "i", - "id" : 0 - } - ] - }, - { - "type" : "value", - "typeID" : 40, - "value" : 97, - "vType" : "int", - "vTypeID" : 2 - } - ] - }, - "outVariable" : { - "id" : 2, - "name" : "1" - }, - "canThrow" : false, - "expressionType" : "simple" - }, - { - "type" : "FilterNode", - "dependencies" : [ - 3 - ], - "id" : 4, - "estimatedCost" : 302, - "estimatedNrItems" : 100, - "inVariable" : { - "id" : 2, - "name" : "1" - } - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 4 - ], - "id" : 5, - "estimatedCost" : 402, - "estimatedNrItems" : 100, - "expression" : { - "type" : "attribute access", - "typeID" : 35, - "name" : "value", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "i", - "id" : 0 - } - ] - }, - "outVariable" : { - "id" : 4, - "name" : "3" - }, - "canThrow" : false, - "expressionType" : "attribute" - }, - { - "type" : "SortNode", - "dependencies" : [ - 5 - ], - "id" : 6, - "estimatedCost" : 1066.3856189774724, - "estimatedNrItems" : 100, - "elements" : [ - { - "inVariable" : { - "id" : 4, - "name" : "3" - }, - "ascending" : true - } - ], - "stable" : false, - "limit" : 0, - "strategy" : "standard" - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 6 - ], - "id" : 7, - "estimatedCost" : 1166.3856189774724, - "estimatedNrItems" : 100, - "expression" : { - "type" : "attribute access", - "typeID" : 35, - "name" : "value", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "i", - "id" : 0 - } - ] - }, - "outVariable" : { - "id" : 6, - "name" : "5" - }, - "canThrow" : false, - "expressionType" : "attribute" - }, - { - "type" : "ReturnNode", - "dependencies" : [ - 7 - ], - "id" : 8, - "estimatedCost" : 1266.3856189774724, - "estimatedNrItems" : 100, - "inVariable" : { - "id" : 6, - "name" : "5" - }, - "count" : true - } - ], - "rules" : [ ], - "collections" : [ - { - "name" : "test", - "type" : "read" - } - ], - "variables" : [ - { - "id" : 6, - "name" : "5" - }, - { - "id" : 4, - "name" : "3" - }, - { - "id" : 2, - "name" : "1" - }, - { - "id" : 0, - "name" : "i" - } - ], - "estimatedCost" : 1266.3856189774724, - "estimatedNrItems" : 100, - "initialize" : true, - "isModificationQuery" : false - }, - "warnings" : [ ], - "stats" : { - "rulesExecuted" : 2, - "rulesSkipped" : 33, - "plansCreated" : 1 - }, - "cacheable" : true -} diff --git a/Documentation/Examples/AQLEXP_07_explainSingleRulePlans.generated b/Documentation/Examples/AQLEXP_07_explainSingleRulePlans.generated deleted file mode 100644 index 63765e92c446..000000000000 --- a/Documentation/Examples/AQLEXP_07_explainSingleRulePlans.generated +++ /dev/null @@ -1,214 +0,0 @@ -arangosh> stmt.explain({ optimizer: { rules: [ "-all", "+use-index-range" ] } }); -{ - "plan" : { - "nodes" : [ - { - "type" : "SingletonNode", - "dependencies" : [ ], - "id" : 1, - "estimatedCost" : 1, - "estimatedNrItems" : 1 - }, - { - "type" : "EnumerateCollectionNode", - "dependencies" : [ - 1 - ], - "id" : 2, - "estimatedCost" : 102, - "estimatedNrItems" : 100, - "random" : false, - "outVariable" : { - "id" : 0, - "name" : "i" - }, - "projections" : [ ], - "producesResult" : true, - "database" : "_system", - "collection" : "test", - "satellite" : false - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 2 - ], - "id" : 3, - "estimatedCost" : 202, - "estimatedNrItems" : 100, - "expression" : { - "type" : "compare >", - "typeID" : 29, - "subNodes" : [ - { - "type" : "attribute access", - "typeID" : 35, - "name" : "value", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "i", - "id" : 0 - } - ] - }, - { - "type" : "value", - "typeID" : 40, - "value" : 97, - "vType" : "int", - "vTypeID" : 2 - } - ] - }, - "outVariable" : { - "id" : 2, - "name" : "1" - }, - "canThrow" : false, - "expressionType" : "simple" - }, - { - "type" : "FilterNode", - "dependencies" : [ - 3 - ], - "id" : 4, - "estimatedCost" : 302, - "estimatedNrItems" : 100, - "inVariable" : { - "id" : 2, - "name" : "1" - } - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 4 - ], - "id" : 5, - "estimatedCost" : 402, - "estimatedNrItems" : 100, - "expression" : { - "type" : "attribute access", - "typeID" : 35, - "name" : "value", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "i", - "id" : 0 - } - ] - }, - "outVariable" : { - "id" : 4, - "name" : "3" - }, - "canThrow" : false, - "expressionType" : "attribute" - }, - { - "type" : "SortNode", - "dependencies" : [ - 5 - ], - "id" : 6, - "estimatedCost" : 1066.3856189774724, - "estimatedNrItems" : 100, - "elements" : [ - { - "inVariable" : { - "id" : 4, - "name" : "3" - }, - "ascending" : true - } - ], - "stable" : false, - "limit" : 0, - "strategy" : "standard" - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 6 - ], - "id" : 7, - "estimatedCost" : 1166.3856189774724, - "estimatedNrItems" : 100, - "expression" : { - "type" : "attribute access", - "typeID" : 35, - "name" : "value", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "i", - "id" : 0 - } - ] - }, - "outVariable" : { - "id" : 6, - "name" : "5" - }, - "canThrow" : false, - "expressionType" : "attribute" - }, - { - "type" : "ReturnNode", - "dependencies" : [ - 7 - ], - "id" : 8, - "estimatedCost" : 1266.3856189774724, - "estimatedNrItems" : 100, - "inVariable" : { - "id" : 6, - "name" : "5" - }, - "count" : true - } - ], - "rules" : [ ], - "collections" : [ - { - "name" : "test", - "type" : "read" - } - ], - "variables" : [ - { - "id" : 6, - "name" : "5" - }, - { - "id" : 4, - "name" : "3" - }, - { - "id" : 2, - "name" : "1" - }, - { - "id" : 0, - "name" : "i" - } - ], - "estimatedCost" : 1266.3856189774724, - "estimatedNrItems" : 100, - "initialize" : true, - "isModificationQuery" : false - }, - "warnings" : [ ], - "stats" : { - "rulesExecuted" : 2, - "rulesSkipped" : 33, - "plansCreated" : 1 - }, - "cacheable" : true -} diff --git a/Documentation/Examples/AQLEXP_08_explainDisableSingleRulePlans.generated b/Documentation/Examples/AQLEXP_08_explainDisableSingleRulePlans.generated deleted file mode 100644 index c9e849df8d15..000000000000 --- a/Documentation/Examples/AQLEXP_08_explainDisableSingleRulePlans.generated +++ /dev/null @@ -1,199 +0,0 @@ -arangosh> stmt.explain({ optimizer: { rules: [ "-use-index-range", "-use-index-for-sort" ] } }); -{ - "plan" : { - "nodes" : [ - { - "type" : "SingletonNode", - "dependencies" : [ ], - "id" : 1, - "estimatedCost" : 1, - "estimatedNrItems" : 1 - }, - { - "type" : "IndexNode", - "dependencies" : [ - 1 - ], - "id" : 9, - "estimatedCost" : 7.643856189774724, - "estimatedNrItems" : 50, - "outVariable" : { - "id" : 0, - "name" : "i" - }, - "projections" : [ ], - "producesResult" : true, - "database" : "_system", - "collection" : "test", - "satellite" : false, - "needsGatherNodeSort" : false, - "indexCoversProjections" : false, - "indexes" : [ - { - "id" : "97535", - "type" : "skiplist", - "fields" : [ - "value" - ], - "unique" : false, - "sparse" : false, - "deduplicate" : true - } - ], - "condition" : { - "type" : "n-ary or", - "typeID" : 63, - "subNodes" : [ - { - "type" : "n-ary and", - "typeID" : 62, - "subNodes" : [ - { - "type" : "compare >", - "typeID" : 29, - "subNodes" : [ - { - "type" : "attribute access", - "typeID" : 35, - "name" : "value", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "i", - "id" : 0 - } - ] - }, - { - "type" : "value", - "typeID" : 40, - "value" : 97, - "vType" : "int", - "vTypeID" : 2 - } - ] - } - ] - } - ] - }, - "sorted" : true, - "ascending" : true, - "reverse" : false, - "evalFCalls" : true, - "fullRange" : false, - "limit" : 0 - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 9 - ], - "id" : 5, - "estimatedCost" : 57.64385618977472, - "estimatedNrItems" : 50, - "expression" : { - "type" : "attribute access", - "typeID" : 35, - "name" : "value", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "i", - "id" : 0 - } - ] - }, - "outVariable" : { - "id" : 4, - "name" : "3" - }, - "canThrow" : false, - "expressionType" : "attribute" - }, - { - "type" : "SortNode", - "dependencies" : [ - 5 - ], - "id" : 6, - "estimatedCost" : 339.83666567851094, - "estimatedNrItems" : 50, - "elements" : [ - { - "inVariable" : { - "id" : 4, - "name" : "3" - }, - "ascending" : true - } - ], - "stable" : false, - "limit" : 0, - "strategy" : "standard" - }, - { - "type" : "ReturnNode", - "dependencies" : [ - 6 - ], - "id" : 8, - "estimatedCost" : 389.83666567851094, - "estimatedNrItems" : 50, - "inVariable" : { - "id" : 4, - "name" : "3" - }, - "count" : true - } - ], - "rules" : [ - "move-calculations-up", - "move-filters-up", - "remove-redundant-calculations", - "remove-unnecessary-calculations", - "move-calculations-up-2", - "move-filters-up-2", - "use-indexes", - "remove-filter-covered-by-index", - "remove-unnecessary-calculations-2" - ], - "collections" : [ - { - "name" : "test", - "type" : "read" - } - ], - "variables" : [ - { - "id" : 6, - "name" : "5" - }, - { - "id" : 4, - "name" : "3" - }, - { - "id" : 2, - "name" : "1" - }, - { - "id" : 0, - "name" : "i" - } - ], - "estimatedCost" : 389.83666567851094, - "estimatedNrItems" : 50, - "initialize" : true, - "isModificationQuery" : false - }, - "warnings" : [ ], - "stats" : { - "rulesExecuted" : 34, - "rulesSkipped" : 1, - "plansCreated" : 1 - }, - "cacheable" : true -} diff --git a/Documentation/Examples/AQLEXP_09_explainMaxNumberOfPlans.generated b/Documentation/Examples/AQLEXP_09_explainMaxNumberOfPlans.generated deleted file mode 100644 index 7d4023e0366f..000000000000 --- a/Documentation/Examples/AQLEXP_09_explainMaxNumberOfPlans.generated +++ /dev/null @@ -1,179 +0,0 @@ -arangosh> stmt.explain({ maxNumberOfPlans: 1 }); -{ - "plan" : { - "nodes" : [ - { - "type" : "SingletonNode", - "dependencies" : [ ], - "id" : 1, - "estimatedCost" : 1, - "estimatedNrItems" : 1 - }, - { - "type" : "IndexNode", - "dependencies" : [ - 1 - ], - "id" : 9, - "estimatedCost" : 7.643856189774724, - "estimatedNrItems" : 50, - "outVariable" : { - "id" : 0, - "name" : "i" - }, - "projections" : [ ], - "producesResult" : true, - "database" : "_system", - "collection" : "test", - "satellite" : false, - "needsGatherNodeSort" : true, - "indexCoversProjections" : false, - "indexes" : [ - { - "id" : "97535", - "type" : "skiplist", - "fields" : [ - "value" - ], - "unique" : false, - "sparse" : false, - "deduplicate" : true - } - ], - "condition" : { - "type" : "n-ary or", - "typeID" : 63, - "subNodes" : [ - { - "type" : "n-ary and", - "typeID" : 62, - "subNodes" : [ - { - "type" : "compare >", - "typeID" : 29, - "subNodes" : [ - { - "type" : "attribute access", - "typeID" : 35, - "name" : "value", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "i", - "id" : 0 - } - ] - }, - { - "type" : "value", - "typeID" : 40, - "value" : 97, - "vType" : "int", - "vTypeID" : 2 - } - ] - } - ] - } - ] - }, - "sorted" : true, - "ascending" : true, - "reverse" : false, - "evalFCalls" : true, - "fullRange" : false, - "limit" : 0 - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 9 - ], - "id" : 5, - "estimatedCost" : 57.64385618977472, - "estimatedNrItems" : 50, - "expression" : { - "type" : "attribute access", - "typeID" : 35, - "name" : "value", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "i", - "id" : 0 - } - ] - }, - "outVariable" : { - "id" : 4, - "name" : "3" - }, - "canThrow" : false, - "expressionType" : "attribute" - }, - { - "type" : "ReturnNode", - "dependencies" : [ - 5 - ], - "id" : 8, - "estimatedCost" : 107.64385618977472, - "estimatedNrItems" : 50, - "inVariable" : { - "id" : 4, - "name" : "3" - }, - "count" : true - } - ], - "rules" : [ - "move-calculations-up", - "move-filters-up", - "remove-redundant-calculations", - "remove-unnecessary-calculations", - "move-calculations-up-2", - "move-filters-up-2", - "use-indexes", - "remove-filter-covered-by-index", - "use-index-for-sort", - "remove-unnecessary-calculations-2" - ], - "collections" : [ - { - "name" : "test", - "type" : "read" - } - ], - "variables" : [ - { - "id" : 6, - "name" : "5" - }, - { - "id" : 4, - "name" : "3" - }, - { - "id" : 2, - "name" : "1" - }, - { - "id" : 0, - "name" : "i" - } - ], - "estimatedCost" : 107.64385618977472, - "estimatedNrItems" : 50, - "initialize" : true, - "isModificationQuery" : false - }, - "warnings" : [ ], - "stats" : { - "rulesExecuted" : 34, - "rulesSkipped" : 1, - "plansCreated" : 1 - }, - "cacheable" : true -} diff --git a/Documentation/Examples/AQLEXP_10_explainWarn.generated b/Documentation/Examples/AQLEXP_10_explainWarn.generated deleted file mode 100644 index e74212642408..000000000000 --- a/Documentation/Examples/AQLEXP_10_explainWarn.generated +++ /dev/null @@ -1,8 +0,0 @@ -arangosh> var stmt = db._createStatement("FOR i IN 1..10 RETURN 1 / 0") -arangosh> stmt.explain().warnings; -[ - { - "code" : 1562, - "message" : "division by zero" - } -] diff --git a/Documentation/Examples/AQLEXP_11_explainjs.generated b/Documentation/Examples/AQLEXP_11_explainjs.generated deleted file mode 100644 index c6af862777e7..000000000000 --- a/Documentation/Examples/AQLEXP_11_explainjs.generated +++ /dev/null @@ -1,50 +0,0 @@ -arangosh> db._explain('FOR x IN 1..10 LET then=DATE_NOW() FOR y IN 1..10 LET now=DATE_NOW() LET nowstr=CONCAT(now, x, y, then) RETURN nowstr', {}, {colors: false}) -Query string: - FOR x IN 1..10 LET then=DATE_NOW() FOR y IN 1..10 LET now=DATE_NOW() LET nowstr=CONCAT(now, x, y, - then) RETURN nowstr - -Execution plan: - Id NodeType Est. Comment - 1 SingletonNode 1 * ROOT - 2 CalculationNode 1 - LET #5 = 1 .. 10 /* range */ /* simple expression */ - 3 EnumerateListNode 10 - FOR x IN #5 /* list iteration */ - 4 CalculationNode 10 - LET then = DATE_NOW() /* simple expression */ - 6 EnumerateListNode 100 - FOR y IN #5 /* list iteration */ - 7 CalculationNode 100 - LET now = DATE_NOW() /* simple expression */ - 8 CalculationNode 100 - LET nowstr = CONCAT(now, x, y, then) /* simple expression */ - 9 ReturnNode 100 - RETURN nowstr - -Indexes used: - none - -Optimization rules applied: - Id RuleName - 1 move-calculations-up - 2 remove-redundant-calculations - 3 remove-unnecessary-calculations - - -arangosh> db._explain('LET now=DATE_NOW() FOR x IN 1..10 FOR y IN 1..10 LET nowstr=CONCAT(now, x, y, now) RETURN nowstr', {}, {colors: false}) -Query string: - LET now=DATE_NOW() FOR x IN 1..10 FOR y IN 1..10 LET nowstr=CONCAT(now, x, y, now) RETURN nowstr - -Execution plan: - Id NodeType Est. Comment - 1 SingletonNode 1 * ROOT - 3 CalculationNode 1 - LET #4 = 1 .. 10 /* range */ /* simple expression */ - 2 CalculationNode 1 - LET now = DATE_NOW() /* simple expression */ - 4 EnumerateListNode 10 - FOR x IN #4 /* list iteration */ - 6 EnumerateListNode 100 - FOR y IN #4 /* list iteration */ - 7 CalculationNode 100 - LET nowstr = CONCAT(now, x, y, now) /* simple expression */ - 8 ReturnNode 100 - RETURN nowstr - -Indexes used: - none - -Optimization rules applied: - Id RuleName - 1 move-calculations-up - 2 remove-redundant-calculations - 3 remove-unnecessary-calculations - - diff --git a/Documentation/Examples/COMBINING_GRAPH_01_create_graph.generated b/Documentation/Examples/COMBINING_GRAPH_01_create_graph.generated deleted file mode 100644 index 6472fb4682dd..000000000000 --- a/Documentation/Examples/COMBINING_GRAPH_01_create_graph.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var g = examples.loadGraph("routeplanner"); diff --git a/Documentation/Examples/COMBINING_GRAPH_02_show_geo.generated b/Documentation/Examples/COMBINING_GRAPH_02_show_geo.generated deleted file mode 100644 index 643f56490ce7..000000000000 --- a/Documentation/Examples/COMBINING_GRAPH_02_show_geo.generated +++ /dev/null @@ -1,17 +0,0 @@ -@Q: - FOR startCity IN germanCity - FILTER GEO_DISTANCE(@bonn, startCity.geometry) < @radius - RETURN startCity._key -@B -{ - "bonn": [ - 7.0998, - 50.734 - ], - "radius": 400000 -} -@R -[ - "Cologne", - "Hamburg" -] \ No newline at end of file diff --git a/Documentation/Examples/COMBINING_GRAPH_03_explain_geo.generated b/Documentation/Examples/COMBINING_GRAPH_03_explain_geo.generated deleted file mode 100644 index 208cfbdfbafe..000000000000 --- a/Documentation/Examples/COMBINING_GRAPH_03_explain_geo.generated +++ /dev/null @@ -1,39 +0,0 @@ -@Q: - FOR startCity IN germanCity - FILTER GEO_DISTANCE(@bonn, startCity.geometry) < @radius - RETURN startCity._key -@B -{ - "bonn": [ - 7.0998, - 50.734 - ], - "radius": 400000 -} -@R -Query String: - FOR startCity IN germanCity - FILTER GEO_DISTANCE(@bonn, startCity.geometry) < @radius - RETURN startCity._key - - -Execution plan: - Id NodeType Est. Comment - 1 SingletonNode 1 * ROOT - 7 IndexNode 3 - FOR startCity IN germanCity /* geo index scan */ - 5 CalculationNode 3 - LET #3 = startCity.`_key` /* attribute expression */ /* collections used: startCity : germanCity */ - 6 ReturnNode 3 - RETURN #3 - -Indexes used: - By Type Collection Unique Sparse Selectivity Fields Ranges - 7 geo germanCity false true n/a [ `geometry` ] (GEO_DISTANCE([ 7.0998, 50.734 ], startCity.`geometry`) < 400000) - -Optimization rules applied: - Id RuleName - 1 move-calculations-up - 2 move-filters-up - 3 move-calculations-up-2 - 4 move-filters-up-2 - 5 geo-index-optimizer - 6 remove-unnecessary-calculations-2 - diff --git a/Documentation/Examples/COMBINING_GRAPH_04_combine.generated b/Documentation/Examples/COMBINING_GRAPH_04_combine.generated deleted file mode 100644 index a56b20c8c931..000000000000 --- a/Documentation/Examples/COMBINING_GRAPH_04_combine.generated +++ /dev/null @@ -1,37 +0,0 @@ -@Q: - FOR startCity IN germanCity - FILTER GEO_DISTANCE(@bonn, startCity.geometry) < @radius - FOR v, e, p IN 1..1 OUTBOUND startCity - GRAPH 'routeplanner' - RETURN {startcity: startCity._key, traversedCity: v._key} -@B -{ - "bonn": [ - 7.0998, - 50.734 - ], - "radius": 400000 -} -@R -[ - { - "startcity": "Cologne", - "traversedCity": "Lyon" - }, - { - "startcity": "Cologne", - "traversedCity": "Paris" - }, - { - "startcity": "Hamburg", - "traversedCity": "Cologne" - }, - { - "startcity": "Hamburg", - "traversedCity": "Paris" - }, - { - "startcity": "Hamburg", - "traversedCity": "Lyon" - } -] \ No newline at end of file diff --git a/Documentation/Examples/COMBINING_GRAPH_05_combine_let.generated b/Documentation/Examples/COMBINING_GRAPH_05_combine_let.generated deleted file mode 100644 index be8872467527..000000000000 --- a/Documentation/Examples/COMBINING_GRAPH_05_combine_let.generated +++ /dev/null @@ -1,34 +0,0 @@ -@Q: - FOR startCity IN germanCity - FILTER GEO_DISTANCE(@bonn, startCity.geometry) < @radius - LET oneCity = ( - FOR v, e, p IN 1..1 OUTBOUND startCity - GRAPH 'routeplanner' RETURN v._key - ) - RETURN {startCity: startCity._key, connectedCities: oneCity} -@B -{ - "bonn": [ - 7.0998, - 50.734 - ], - "radius": 400000 -} -@R -[ - { - "startCity": "Cologne", - "connectedCities": [ - "Lyon", - "Paris" - ] - }, - { - "startCity": "Hamburg", - "connectedCities": [ - "Cologne", - "Paris", - "Lyon" - ] - } -] \ No newline at end of file diff --git a/Documentation/Examples/COMBINING_GRAPH_06_cleanup.generated b/Documentation/Examples/COMBINING_GRAPH_06_cleanup.generated deleted file mode 100644 index ee9e73411cd7..000000000000 --- a/Documentation/Examples/COMBINING_GRAPH_06_cleanup.generated +++ /dev/null @@ -1 +0,0 @@ -arangosh> examples.dropGraph("routeplanner"); diff --git a/Documentation/Examples/CollectionUnload.generated b/Documentation/Examples/CollectionUnload.generated deleted file mode 100644 index dff484b368ab..000000000000 --- a/Documentation/Examples/CollectionUnload.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> col = db.example; -[ArangoCollection 98269, "example" (type document, status loaded)] -arangosh> col.unload(); -arangosh> col; -[ArangoCollection 98269, "example" (type document, status unloaded)] diff --git a/Documentation/Examples/EDGCOL_01_SaveEdgeCol.generated b/Documentation/Examples/EDGCOL_01_SaveEdgeCol.generated deleted file mode 100644 index d895a6870560..000000000000 --- a/Documentation/Examples/EDGCOL_01_SaveEdgeCol.generated +++ /dev/null @@ -1,31 +0,0 @@ -arangosh> db._create("vertex"); -[ArangoCollection 98280, "vertex" (type document, status loaded)] -arangosh> db._createEdgeCollection("relation"); -[ArangoCollection 98286, "relation" (type edge, status loaded)] -arangosh> v1 = db.vertex.insert({ name : "vertex 1" }); -{ - "_id" : "vertex/98293", - "_key" : "98293", - "_rev" : "_YOn1FYu--_" -} -arangosh> v2 = db.vertex.insert({ name : "vertex 2" }); -{ - "_id" : "vertex/98297", - "_key" : "98297", - "_rev" : "_YOn1FYu--B" -} -arangosh> e1 = db.relation.insert(v1, v2, { label : "knows" }); -{ - "_id" : "relation/98300", - "_key" : "98300", - "_rev" : "_YOn1FYu--D" -} -arangosh> db._document(e1); -{ - "_key" : "98300", - "_id" : "relation/98300", - "_from" : "vertex/98293", - "_to" : "vertex/98297", - "_rev" : "_YOn1FYu--D", - "label" : "knows" -} diff --git a/Documentation/Examples/EDGCOL_02_Relation.generated b/Documentation/Examples/EDGCOL_02_Relation.generated deleted file mode 100644 index e2ce48e264bc..000000000000 --- a/Documentation/Examples/EDGCOL_02_Relation.generated +++ /dev/null @@ -1,35 +0,0 @@ -arangosh> db._create("vertex"); -[ArangoCollection 98313, "vertex" (type document, status loaded)] -arangosh> db._createEdgeCollection("relation"); -[ArangoCollection 98319, "relation" (type edge, status loaded)] -arangosh> var myGraph = {}; -arangosh> myGraph.v1 = db.vertex.insert({ name : "vertex 1" }); -{ - "_id" : "vertex/98326", - "_key" : "98326", - "_rev" : "_YOn1Fay--B" -} -arangosh> myGraph.v2 = db.vertex.insert({ name : "vertex 2" }); -{ - "_id" : "vertex/98330", - "_key" : "98330", - "_rev" : "_YOn1Fay--D" -} -arangosh> myGraph.e1 = db.relation.insert(myGraph.v1, myGraph.v2, -........> { label : "knows"}); -{ - "_id" : "relation/98333", - "_key" : "98333", - "_rev" : "_YOn1Fa2--_" -} -arangosh> db._document(myGraph.e1); -{ - "_key" : "98333", - "_id" : "relation/98333", - "_from" : "vertex/98326", - "_to" : "vertex/98330", - "_rev" : "_YOn1Fa2--_", - "label" : "knows" -} -arangosh> db.relation.edges(myGraph.e1._id); -[ ] diff --git a/Documentation/Examples/EDGCOL_02_inEdges.generated b/Documentation/Examples/EDGCOL_02_inEdges.generated deleted file mode 100644 index 25136fe5e203..000000000000 --- a/Documentation/Examples/EDGCOL_02_inEdges.generated +++ /dev/null @@ -1,45 +0,0 @@ -arangosh> db._create("vertex"); -[ArangoCollection 98347, "vertex" (type document, status loaded)] -arangosh> db._createEdgeCollection("relation"); -[ArangoCollection 98353, "relation" (type edge, status loaded)] -arangosh> myGraph.v1 = db.vertex.insert({ name : "vertex 1" }); -{ - "_id" : "vertex/98360", - "_key" : "98360", - "_rev" : "_YOn1Fc6--_" -} -arangosh> myGraph.v2 = db.vertex.insert({ name : "vertex 2" }); -{ - "_id" : "vertex/98364", - "_key" : "98364", - "_rev" : "_YOn1Fc6--B" -} -arangosh> myGraph.e1 = db.relation.insert(myGraph.v1, myGraph.v2, -........> { label : "knows"}); -{ - "_id" : "relation/98367", - "_key" : "98367", - "_rev" : "_YOn1Fc6--D" -} -arangosh> db._document(myGraph.e1); -{ - "_key" : "98367", - "_id" : "relation/98367", - "_from" : "vertex/98360", - "_to" : "vertex/98364", - "_rev" : "_YOn1Fc6--D", - "label" : "knows" -} -arangosh> db.relation.inEdges(myGraph.v1._id); -[ ] -arangosh> db.relation.inEdges(myGraph.v2._id); -[ - { - "_key" : "98367", - "_id" : "relation/98367", - "_from" : "vertex/98360", - "_to" : "vertex/98364", - "_rev" : "_YOn1Fc6--D", - "label" : "knows" - } -] diff --git a/Documentation/Examples/EDGCOL_02_outEdges.generated b/Documentation/Examples/EDGCOL_02_outEdges.generated deleted file mode 100644 index 930497c6f467..000000000000 --- a/Documentation/Examples/EDGCOL_02_outEdges.generated +++ /dev/null @@ -1,45 +0,0 @@ -arangosh> db._create("vertex"); -[ArangoCollection 98382, "vertex" (type document, status loaded)] -arangosh> db._createEdgeCollection("relation"); -[ArangoCollection 98388, "relation" (type edge, status loaded)] -arangosh> myGraph.v1 = db.vertex.insert({ name : "vertex 1" }); -{ - "_id" : "vertex/98395", - "_key" : "98395", - "_rev" : "_YOn1Ff---_" -} -arangosh> myGraph.v2 = db.vertex.insert({ name : "vertex 2" }); -{ - "_id" : "vertex/98399", - "_key" : "98399", - "_rev" : "_YOn1Ff---B" -} -arangosh> myGraph.e1 = db.relation.insert(myGraph.v1, myGraph.v2, -........> { label : "knows"}); -{ - "_id" : "relation/98402", - "_key" : "98402", - "_rev" : "_YOn1Ff---D" -} -arangosh> db._document(myGraph.e1); -{ - "_key" : "98402", - "_id" : "relation/98402", - "_from" : "vertex/98395", - "_to" : "vertex/98399", - "_rev" : "_YOn1Ff---D", - "label" : "knows" -} -arangosh> db.relation.outEdges(myGraph.v1._id); -[ - { - "_key" : "98402", - "_id" : "relation/98402", - "_from" : "vertex/98395", - "_to" : "vertex/98399", - "_rev" : "_YOn1Ff---D", - "label" : "knows" - } -] -arangosh> db.relation.outEdges(myGraph.v2._id); -[ ] diff --git a/Documentation/Examples/GRAPHKSP_01_create_graph.generated b/Documentation/Examples/GRAPHKSP_01_create_graph.generated deleted file mode 100644 index d15890c72a04..000000000000 --- a/Documentation/Examples/GRAPHKSP_01_create_graph.generated +++ /dev/null @@ -1,404 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("kShortestPathsGraph"); -arangosh> db.places.toArray(); -[ - { - "_key" : "York", - "_id" : "places/York", - "_rev" : "_Yi4-H6e--_", - "label" : "York" - }, - { - "_key" : "Cologne", - "_id" : "places/Cologne", - "_rev" : "_Yi4-H6u--_", - "label" : "Cologne" - }, - { - "_key" : "Birmingham", - "_id" : "places/Birmingham", - "_rev" : "_Yi4-H6m--_", - "label" : "Birmingham" - }, - { - "_key" : "London", - "_id" : "places/London", - "_rev" : "_Yi4-H6m--B", - "label" : "London" - }, - { - "_key" : "StAndrews", - "_id" : "places/StAndrews", - "_rev" : "_Yi4-H6W--_", - "label" : "StAndrews" - }, - { - "_key" : "Jasper", - "_id" : "places/Jasper", - "_rev" : "_Yi4-H7---_", - "label" : "Jasper" - }, - { - "_key" : "Saskatoon", - "_id" : "places/Saskatoon", - "_rev" : "_Yi4-H62--_", - "label" : "Saskatoon" - }, - { - "_key" : "Winnipeg", - "_id" : "places/Winnipeg", - "_rev" : "_Yi4-H6y--B", - "label" : "Winnipeg" - }, - { - "_key" : "Toronto", - "_id" : "places/Toronto", - "_rev" : "_Yi4-H6y--_", - "label" : "Toronto" - }, - { - "_key" : "Aberdeen", - "_id" : "places/Aberdeen", - "_rev" : "_Yi4-H6O--_", - "label" : "Aberdeen" - }, - { - "_key" : "Glasgow", - "_id" : "places/Glasgow", - "_rev" : "_Yi4-H6a--_", - "label" : "Glasgow" - }, - { - "_key" : "Vancouver", - "_id" : "places/Vancouver", - "_rev" : "_Yi4-H7---B", - "label" : "Vancouver" - }, - { - "_key" : "Inverness", - "_id" : "places/Inverness", - "_rev" : "_Yi4-H6K--_", - "label" : "Inverness" - }, - { - "_key" : "Edmonton", - "_id" : "places/Edmonton", - "_rev" : "_Yi4-H66--_", - "label" : "Edmonton" - }, - { - "_key" : "Edinburgh", - "_id" : "places/Edinburgh", - "_rev" : "_Yi4-H6W--B", - "label" : "Edinburgh" - }, - { - "_key" : "Brussels", - "_id" : "places/Brussels", - "_rev" : "_Yi4-H6q--_", - "label" : "Brussels" - }, - { - "_key" : "Carlisle", - "_id" : "places/Carlisle", - "_rev" : "_Yi4-H6i--_", - "label" : "Carlisle" - }, - { - "_key" : "Leuchars", - "_id" : "places/Leuchars", - "_rev" : "_Yi4-H6S--_", - "label" : "Leuchars" - } -] -arangosh> db.connections.toArray(); -[ - { - "_key" : "98528", - "_id" : "connections/98528", - "_from" : "places/Leuchars", - "_to" : "places/Edinburgh", - "_rev" : "_Yi4-H7S--_", - "travelTime" : 1.5 - }, - { - "_key" : "98612", - "_id" : "connections/98612", - "_from" : "places/Edmonton", - "_to" : "places/Jasper", - "_rev" : "_Yi4-H8q--_", - "travelTime" : 6 - }, - { - "_key" : "98585", - "_id" : "connections/98585", - "_from" : "places/Brussels", - "_to" : "places/London", - "_rev" : "_Yi4-H8K--B", - "travelTime" : 0.4 - }, - { - "_key" : "98600", - "_id" : "connections/98600", - "_from" : "places/Winnipeg", - "_to" : "places/Saskatoon", - "_rev" : "_Yi4-H8a--B", - "travelTime" : 12 - }, - { - "_key" : "98591", - "_id" : "connections/98591", - "_from" : "places/Cologne", - "_to" : "places/Brussels", - "_rev" : "_Yi4-H8S--_", - "travelTime" : 0.5 - }, - { - "_key" : "98606", - "_id" : "connections/98606", - "_from" : "places/Saskatoon", - "_to" : "places/Edmonton", - "_rev" : "_Yi4-H8i--_", - "travelTime" : 12 - }, - { - "_key" : "98549", - "_id" : "connections/98549", - "_from" : "places/Carlisle", - "_to" : "places/Glasgow", - "_rev" : "_Yi4-H7m--_", - "travelTime" : 1 - }, - { - "_key" : "98573", - "_id" : "connections/98573", - "_from" : "places/StAndrews", - "_to" : "places/Leuchars", - "_rev" : "_Yi4-H8---B", - "travelTime" : 5 - }, - { - "_key" : "98534", - "_id" : "connections/98534", - "_from" : "places/Edinburgh", - "_to" : "places/Glasgow", - "_rev" : "_Yi4-H7W--_", - "travelTime" : 1 - }, - { - "_key" : "98588", - "_id" : "connections/98588", - "_from" : "places/Brussels", - "_to" : "places/Cologne", - "_rev" : "_Yi4-H8O--_", - "travelTime" : 2 - }, - { - "_key" : "98531", - "_id" : "connections/98531", - "_from" : "places/Edinburgh", - "_to" : "places/Leuchars", - "_rev" : "_Yi4-H7S--B", - "travelTime" : 0.6666666666666666 - }, - { - "_key" : "98558", - "_id" : "connections/98558", - "_from" : "places/Carlisle", - "_to" : "places/Birmingham", - "_rev" : "_Yi4-H7y--_", - "travelTime" : 2 - }, - { - "_key" : "98543", - "_id" : "connections/98543", - "_from" : "places/York", - "_to" : "places/Edinburgh", - "_rev" : "_Yi4-H7i--_", - "travelTime" : 0.2857142857142857 - }, - { - "_key" : "98570", - "_id" : "connections/98570", - "_from" : "places/Leuchars", - "_to" : "places/StAndrews", - "_rev" : "_Yi4-H8---_", - "travelTime" : 0.2 - }, - { - "_key" : "98582", - "_id" : "connections/98582", - "_from" : "places/London", - "_to" : "places/Brussels", - "_rev" : "_Yi4-H8K--_", - "travelTime" : 2.5 - }, - { - "_key" : "98597", - "_id" : "connections/98597", - "_from" : "places/Winnipeg", - "_to" : "places/Toronto", - "_rev" : "_Yi4-H8a--_", - "travelTime" : 0.027777777777777776 - }, - { - "_key" : "98603", - "_id" : "connections/98603", - "_from" : "places/Saskatoon", - "_to" : "places/Winnipeg", - "_rev" : "_Yi4-H8e--_", - "travelTime" : 0.08333333333333333 - }, - { - "_key" : "98555", - "_id" : "connections/98555", - "_from" : "places/York", - "_to" : "places/Carlisle", - "_rev" : "_Yi4-H7u--_", - "travelTime" : 0.4 - }, - { - "_key" : "98621", - "_id" : "connections/98621", - "_from" : "places/Vancouver", - "_to" : "places/Jasper", - "_rev" : "_Yi4-H8y--_", - "travelTime" : 0.08333333333333333 - }, - { - "_key" : "98609", - "_id" : "connections/98609", - "_from" : "places/Edmonton", - "_to" : "places/Saskatoon", - "_rev" : "_Yi4-H8m--_", - "travelTime" : 0.08333333333333333 - }, - { - "_key" : "98615", - "_id" : "connections/98615", - "_from" : "places/Jasper", - "_to" : "places/Edmonton", - "_rev" : "_Yi4-H8u--_", - "travelTime" : 0.16666666666666666 - }, - { - "_key" : "98576", - "_id" : "connections/98576", - "_from" : "places/York", - "_to" : "places/London", - "_rev" : "_Yi4-H8C--_", - "travelTime" : 1.8 - }, - { - "_key" : "98552", - "_id" : "connections/98552", - "_from" : "places/Carlisle", - "_to" : "places/York", - "_rev" : "_Yi4-H7q--_", - "travelTime" : 2.5 - }, - { - "_key" : "98567", - "_id" : "connections/98567", - "_from" : "places/London", - "_to" : "places/Birmingham", - "_rev" : "_Yi4-H76--_", - "travelTime" : 0.6666666666666666 - }, - { - "_key" : "98537", - "_id" : "connections/98537", - "_from" : "places/Glasgow", - "_to" : "places/Edinburgh", - "_rev" : "_Yi4-H7a--_", - "travelTime" : 1 - }, - { - "_key" : "98618", - "_id" : "connections/98618", - "_from" : "places/Jasper", - "_to" : "places/Vancouver", - "_rev" : "_Yi4-H8u--B", - "travelTime" : 12 - }, - { - "_key" : "98579", - "_id" : "connections/98579", - "_from" : "places/London", - "_to" : "places/York", - "_rev" : "_Yi4-H8G--_", - "travelTime" : 0.5555555555555556 - }, - { - "_key" : "98522", - "_id" : "connections/98522", - "_from" : "places/Aberdeen", - "_to" : "places/Leuchars", - "_rev" : "_Yi4-H7K--_", - "travelTime" : 1.5 - }, - { - "_key" : "98515", - "_id" : "connections/98515", - "_from" : "places/Inverness", - "_to" : "places/Aberdeen", - "_rev" : "_Yi4-H7C--_", - "travelTime" : 3 - }, - { - "_key" : "98540", - "_id" : "connections/98540", - "_from" : "places/Edinburgh", - "_to" : "places/York", - "_rev" : "_Yi4-H7e--_", - "travelTime" : 3.5 - }, - { - "_key" : "98519", - "_id" : "connections/98519", - "_from" : "places/Aberdeen", - "_to" : "places/Inverness", - "_rev" : "_Yi4-H7G--_", - "travelTime" : 0.3333333333333333 - }, - { - "_key" : "98564", - "_id" : "connections/98564", - "_from" : "places/Birmingham", - "_to" : "places/London", - "_rev" : "_Yi4-H72--_", - "travelTime" : 1.5 - }, - { - "_key" : "98546", - "_id" : "connections/98546", - "_from" : "places/Glasgow", - "_to" : "places/Carlisle", - "_rev" : "_Yi4-H7i--B", - "travelTime" : 1 - }, - { - "_key" : "98561", - "_id" : "connections/98561", - "_from" : "places/Birmingham", - "_to" : "places/Carlisle", - "_rev" : "_Yi4-H7y--B", - "travelTime" : 0.5 - }, - { - "_key" : "98525", - "_id" : "connections/98525", - "_from" : "places/Leuchars", - "_to" : "places/Aberdeen", - "_rev" : "_Yi4-H7O--_", - "travelTime" : 0.6666666666666666 - }, - { - "_key" : "98594", - "_id" : "connections/98594", - "_from" : "places/Toronto", - "_to" : "places/Winnipeg", - "_rev" : "_Yi4-H8W--_", - "travelTime" : 36 - } -] diff --git a/Documentation/Examples/GRAPHKSP_02_Aberdeen_to_London.generated b/Documentation/Examples/GRAPHKSP_02_Aberdeen_to_London.generated deleted file mode 100644 index 804e58b40119..000000000000 --- a/Documentation/Examples/GRAPHKSP_02_Aberdeen_to_London.generated +++ /dev/null @@ -1,150 +0,0 @@ -arangosh> db._query("FOR v, e IN OUTBOUND SHORTEST_PATH 'places/Aberdeen' TO 'places/London' GRAPH 'shortestPathsGraph' RETURN [v,e]"); -[ - [ - { - "_key" : "Aberdeen", - "_id" : "places/Aberdeen", - "_rev" : "_Yi4-H6O--_", - "label" : "Aberdeen" - }, - null - ], - [ - { - "_key" : "Leuchars", - "_id" : "places/Leuchars", - "_rev" : "_Yi4-H6S--_", - "label" : "Leuchars" - }, - { - "_key" : "98522", - "_id" : "connections/98522", - "_from" : "places/Aberdeen", - "_to" : "places/Leuchars", - "_rev" : "_Yi4-H7K--_", - "travelTime" : 1.5 - } - ], - [ - { - "_key" : "Edinburgh", - "_id" : "places/Edinburgh", - "_rev" : "_Yi4-H6W--B", - "label" : "Edinburgh" - }, - { - "_key" : "98528", - "_id" : "connections/98528", - "_from" : "places/Leuchars", - "_to" : "places/Edinburgh", - "_rev" : "_Yi4-H7S--_", - "travelTime" : 1.5 - } - ], - [ - { - "_key" : "York", - "_id" : "places/York", - "_rev" : "_Yi4-H6e--_", - "label" : "York" - }, - { - "_key" : "98540", - "_id" : "connections/98540", - "_from" : "places/Edinburgh", - "_to" : "places/York", - "_rev" : "_Yi4-H7e--_", - "travelTime" : 3.5 - } - ], - [ - { - "_key" : "London", - "_id" : "places/London", - "_rev" : "_Yi4-H6m--B", - "label" : "London" - }, - { - "_key" : "98576", - "_id" : "connections/98576", - "_from" : "places/York", - "_to" : "places/London", - "_rev" : "_Yi4-H8C--_", - "travelTime" : 1.8 - } - ] -] -[object ArangoQueryCursor, count: 5, cached: false, hasMore: false] -arangosh> db._query("FOR p IN OUTBOUND K_SHORTEST_PATHS 'places/Aberdeen' TO 'places/London' GRAPH 'shortestPathsGraph' LIMIT 1 RETURN p"); -[ - { - "edges" : [ - { - "_key" : "98522", - "_id" : "connections/98522", - "_from" : "places/Aberdeen", - "_to" : "places/Leuchars", - "_rev" : "_Yi4-H7K--_", - "travelTime" : 1.5 - }, - { - "_key" : "98528", - "_id" : "connections/98528", - "_from" : "places/Leuchars", - "_to" : "places/Edinburgh", - "_rev" : "_Yi4-H7S--_", - "travelTime" : 1.5 - }, - { - "_key" : "98540", - "_id" : "connections/98540", - "_from" : "places/Edinburgh", - "_to" : "places/York", - "_rev" : "_Yi4-H7e--_", - "travelTime" : 3.5 - }, - { - "_key" : "98576", - "_id" : "connections/98576", - "_from" : "places/York", - "_to" : "places/London", - "_rev" : "_Yi4-H8C--_", - "travelTime" : 1.8 - } - ], - "vertices" : [ - { - "_key" : "Aberdeen", - "_id" : "places/Aberdeen", - "_rev" : "_Yi4-H6O--_", - "label" : "Aberdeen" - }, - { - "_key" : "Leuchars", - "_id" : "places/Leuchars", - "_rev" : "_Yi4-H6S--_", - "label" : "Leuchars" - }, - { - "_key" : "Edinburgh", - "_id" : "places/Edinburgh", - "_rev" : "_Yi4-H6W--B", - "label" : "Edinburgh" - }, - { - "_key" : "York", - "_id" : "places/York", - "_rev" : "_Yi4-H6e--_", - "label" : "York" - }, - { - "_key" : "London", - "_id" : "places/London", - "_rev" : "_Yi4-H6m--B", - "label" : "London" - } - ], - "weight" : 4 - } -] -[object ArangoQueryCursor, count: 1, cached: false, hasMore: false] diff --git a/Documentation/Examples/GRAPHKSP_03_Aberdeen_to_London.generated b/Documentation/Examples/GRAPHKSP_03_Aberdeen_to_London.generated deleted file mode 100644 index 0924f454dd5f..000000000000 --- a/Documentation/Examples/GRAPHKSP_03_Aberdeen_to_London.generated +++ /dev/null @@ -1,267 +0,0 @@ -arangosh> db._query("FOR p IN OUTBOUND K_SHORTEST_PATHS 'places/Aberdeen' TO 'places/London' GRAPH 'shortestPathsGraph' LIMIT 3 RETURN p"); -[ - { - "edges" : [ - { - "_key" : "98522", - "_id" : "connections/98522", - "_from" : "places/Aberdeen", - "_to" : "places/Leuchars", - "_rev" : "_Yi4-H7K--_", - "travelTime" : 1.5 - }, - { - "_key" : "98528", - "_id" : "connections/98528", - "_from" : "places/Leuchars", - "_to" : "places/Edinburgh", - "_rev" : "_Yi4-H7S--_", - "travelTime" : 1.5 - }, - { - "_key" : "98540", - "_id" : "connections/98540", - "_from" : "places/Edinburgh", - "_to" : "places/York", - "_rev" : "_Yi4-H7e--_", - "travelTime" : 3.5 - }, - { - "_key" : "98576", - "_id" : "connections/98576", - "_from" : "places/York", - "_to" : "places/London", - "_rev" : "_Yi4-H8C--_", - "travelTime" : 1.8 - } - ], - "vertices" : [ - { - "_key" : "Aberdeen", - "_id" : "places/Aberdeen", - "_rev" : "_Yi4-H6O--_", - "label" : "Aberdeen" - }, - { - "_key" : "Leuchars", - "_id" : "places/Leuchars", - "_rev" : "_Yi4-H6S--_", - "label" : "Leuchars" - }, - { - "_key" : "Edinburgh", - "_id" : "places/Edinburgh", - "_rev" : "_Yi4-H6W--B", - "label" : "Edinburgh" - }, - { - "_key" : "York", - "_id" : "places/York", - "_rev" : "_Yi4-H6e--_", - "label" : "York" - }, - { - "_key" : "London", - "_id" : "places/London", - "_rev" : "_Yi4-H6m--B", - "label" : "London" - } - ], - "weight" : 4 - }, - { - "edges" : [ - { - "_key" : "98522", - "_id" : "connections/98522", - "_from" : "places/Aberdeen", - "_to" : "places/Leuchars", - "_rev" : "_Yi4-H7K--_", - "travelTime" : 1.5 - }, - { - "_key" : "98528", - "_id" : "connections/98528", - "_from" : "places/Leuchars", - "_to" : "places/Edinburgh", - "_rev" : "_Yi4-H7S--_", - "travelTime" : 1.5 - }, - { - "_key" : "98540", - "_id" : "connections/98540", - "_from" : "places/Edinburgh", - "_to" : "places/York", - "_rev" : "_Yi4-H7e--_", - "travelTime" : 3.5 - }, - { - "_key" : "98555", - "_id" : "connections/98555", - "_from" : "places/York", - "_to" : "places/Carlisle", - "_rev" : "_Yi4-H7u--_", - "travelTime" : 0.4 - }, - { - "_key" : "98558", - "_id" : "connections/98558", - "_from" : "places/Carlisle", - "_to" : "places/Birmingham", - "_rev" : "_Yi4-H7y--_", - "travelTime" : 2 - }, - { - "_key" : "98564", - "_id" : "connections/98564", - "_from" : "places/Birmingham", - "_to" : "places/London", - "_rev" : "_Yi4-H72--_", - "travelTime" : 1.5 - } - ], - "vertices" : [ - { - "_key" : "Aberdeen", - "_id" : "places/Aberdeen", - "_rev" : "_Yi4-H6O--_", - "label" : "Aberdeen" - }, - { - "_key" : "Leuchars", - "_id" : "places/Leuchars", - "_rev" : "_Yi4-H6S--_", - "label" : "Leuchars" - }, - { - "_key" : "Edinburgh", - "_id" : "places/Edinburgh", - "_rev" : "_Yi4-H6W--B", - "label" : "Edinburgh" - }, - { - "_key" : "York", - "_id" : "places/York", - "_rev" : "_Yi4-H6e--_", - "label" : "York" - }, - { - "_key" : "Carlisle", - "_id" : "places/Carlisle", - "_rev" : "_Yi4-H6i--_", - "label" : "Carlisle" - }, - { - "_key" : "Birmingham", - "_id" : "places/Birmingham", - "_rev" : "_Yi4-H6m--_", - "label" : "Birmingham" - }, - { - "_key" : "London", - "_id" : "places/London", - "_rev" : "_Yi4-H6m--B", - "label" : "London" - } - ], - "weight" : 6 - }, - { - "edges" : [ - { - "_key" : "98522", - "_id" : "connections/98522", - "_from" : "places/Aberdeen", - "_to" : "places/Leuchars", - "_rev" : "_Yi4-H7K--_", - "travelTime" : 1.5 - }, - { - "_key" : "98528", - "_id" : "connections/98528", - "_from" : "places/Leuchars", - "_to" : "places/Edinburgh", - "_rev" : "_Yi4-H7S--_", - "travelTime" : 1.5 - }, - { - "_key" : "98534", - "_id" : "connections/98534", - "_from" : "places/Edinburgh", - "_to" : "places/Glasgow", - "_rev" : "_Yi4-H7W--_", - "travelTime" : 1 - }, - { - "_key" : "98546", - "_id" : "connections/98546", - "_from" : "places/Glasgow", - "_to" : "places/Carlisle", - "_rev" : "_Yi4-H7i--B", - "travelTime" : 1 - }, - { - "_key" : "98558", - "_id" : "connections/98558", - "_from" : "places/Carlisle", - "_to" : "places/Birmingham", - "_rev" : "_Yi4-H7y--_", - "travelTime" : 2 - }, - { - "_key" : "98564", - "_id" : "connections/98564", - "_from" : "places/Birmingham", - "_to" : "places/London", - "_rev" : "_Yi4-H72--_", - "travelTime" : 1.5 - } - ], - "vertices" : [ - { - "_key" : "Aberdeen", - "_id" : "places/Aberdeen", - "_rev" : "_Yi4-H6O--_", - "label" : "Aberdeen" - }, - { - "_key" : "Leuchars", - "_id" : "places/Leuchars", - "_rev" : "_Yi4-H6S--_", - "label" : "Leuchars" - }, - { - "_key" : "Edinburgh", - "_id" : "places/Edinburgh", - "_rev" : "_Yi4-H6W--B", - "label" : "Edinburgh" - }, - { - "_key" : "Glasgow", - "_id" : "places/Glasgow", - "_rev" : "_Yi4-H6a--_", - "label" : "Glasgow" - }, - { - "_key" : "Carlisle", - "_id" : "places/Carlisle", - "_rev" : "_Yi4-H6i--_", - "label" : "Carlisle" - }, - { - "_key" : "Birmingham", - "_id" : "places/Birmingham", - "_rev" : "_Yi4-H6m--_", - "label" : "Birmingham" - }, - { - "_key" : "London", - "_id" : "places/London", - "_rev" : "_Yi4-H6m--B", - "label" : "London" - } - ], - "weight" : 6 - } -] -[object ArangoQueryCursor, count: 3, cached: false, hasMore: false] diff --git a/Documentation/Examples/GRAPHKSP_04_Aberdeen_to_Toronto.generated b/Documentation/Examples/GRAPHKSP_04_Aberdeen_to_Toronto.generated deleted file mode 100644 index 1348b711b7c3..000000000000 --- a/Documentation/Examples/GRAPHKSP_04_Aberdeen_to_Toronto.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> db._query("FOR p IN OUTBOUND K_SHORTEST_PATHS 'places/Aberdeen' TO 'places/Toronto' GRAPH 'shortestPathsGraph' LIMIT 3 RETURN p"); -[object ArangoQueryCursor, count: 0, cached: false, hasMore: false] diff --git a/Documentation/Examples/GRAPHKSP_05_StAndrews_to_Cologne.generated b/Documentation/Examples/GRAPHKSP_05_StAndrews_to_Cologne.generated deleted file mode 100644 index a4b17b1b4ede..000000000000 --- a/Documentation/Examples/GRAPHKSP_05_StAndrews_to_Cologne.generated +++ /dev/null @@ -1,351 +0,0 @@ -arangosh> db._query("FOR p IN OUTBOUND K_SHORTEST_PATHS 'places/StAndrews' TO 'places/Cologne' GRAPH 'shortestPathsGraph' OPTIONS { 'weightAttribute': 'travelTime', defaultWeight: '15'} LIMIT 3 RETURN p"); -[ - { - "edges" : [ - { - "_key" : "98573", - "_id" : "connections/98573", - "_from" : "places/StAndrews", - "_to" : "places/Leuchars", - "_rev" : "_Yi4-H8---B", - "travelTime" : 5 - }, - { - "_key" : "98528", - "_id" : "connections/98528", - "_from" : "places/Leuchars", - "_to" : "places/Edinburgh", - "_rev" : "_Yi4-H7S--_", - "travelTime" : 1.5 - }, - { - "_key" : "98540", - "_id" : "connections/98540", - "_from" : "places/Edinburgh", - "_to" : "places/York", - "_rev" : "_Yi4-H7e--_", - "travelTime" : 3.5 - }, - { - "_key" : "98576", - "_id" : "connections/98576", - "_from" : "places/York", - "_to" : "places/London", - "_rev" : "_Yi4-H8C--_", - "travelTime" : 1.8 - }, - { - "_key" : "98582", - "_id" : "connections/98582", - "_from" : "places/London", - "_to" : "places/Brussels", - "_rev" : "_Yi4-H8K--_", - "travelTime" : 2.5 - }, - { - "_key" : "98588", - "_id" : "connections/98588", - "_from" : "places/Brussels", - "_to" : "places/Cologne", - "_rev" : "_Yi4-H8O--_", - "travelTime" : 2 - } - ], - "vertices" : [ - { - "_key" : "StAndrews", - "_id" : "places/StAndrews", - "_rev" : "_Yi4-H6W--_", - "label" : "StAndrews" - }, - { - "_key" : "Leuchars", - "_id" : "places/Leuchars", - "_rev" : "_Yi4-H6S--_", - "label" : "Leuchars" - }, - { - "_key" : "Edinburgh", - "_id" : "places/Edinburgh", - "_rev" : "_Yi4-H6W--B", - "label" : "Edinburgh" - }, - { - "_key" : "York", - "_id" : "places/York", - "_rev" : "_Yi4-H6e--_", - "label" : "York" - }, - { - "_key" : "London", - "_id" : "places/London", - "_rev" : "_Yi4-H6m--B", - "label" : "London" - }, - { - "_key" : "Brussels", - "_id" : "places/Brussels", - "_rev" : "_Yi4-H6q--_", - "label" : "Brussels" - }, - { - "_key" : "Cologne", - "_id" : "places/Cologne", - "_rev" : "_Yi4-H6u--_", - "label" : "Cologne" - } - ], - "weight" : 16.3 - }, - { - "edges" : [ - { - "_key" : "98573", - "_id" : "connections/98573", - "_from" : "places/StAndrews", - "_to" : "places/Leuchars", - "_rev" : "_Yi4-H8---B", - "travelTime" : 5 - }, - { - "_key" : "98528", - "_id" : "connections/98528", - "_from" : "places/Leuchars", - "_to" : "places/Edinburgh", - "_rev" : "_Yi4-H7S--_", - "travelTime" : 1.5 - }, - { - "_key" : "98534", - "_id" : "connections/98534", - "_from" : "places/Edinburgh", - "_to" : "places/Glasgow", - "_rev" : "_Yi4-H7W--_", - "travelTime" : 1 - }, - { - "_key" : "98546", - "_id" : "connections/98546", - "_from" : "places/Glasgow", - "_to" : "places/Carlisle", - "_rev" : "_Yi4-H7i--B", - "travelTime" : 1 - }, - { - "_key" : "98558", - "_id" : "connections/98558", - "_from" : "places/Carlisle", - "_to" : "places/Birmingham", - "_rev" : "_Yi4-H7y--_", - "travelTime" : 2 - }, - { - "_key" : "98564", - "_id" : "connections/98564", - "_from" : "places/Birmingham", - "_to" : "places/London", - "_rev" : "_Yi4-H72--_", - "travelTime" : 1.5 - }, - { - "_key" : "98582", - "_id" : "connections/98582", - "_from" : "places/London", - "_to" : "places/Brussels", - "_rev" : "_Yi4-H8K--_", - "travelTime" : 2.5 - }, - { - "_key" : "98588", - "_id" : "connections/98588", - "_from" : "places/Brussels", - "_to" : "places/Cologne", - "_rev" : "_Yi4-H8O--_", - "travelTime" : 2 - } - ], - "vertices" : [ - { - "_key" : "StAndrews", - "_id" : "places/StAndrews", - "_rev" : "_Yi4-H6W--_", - "label" : "StAndrews" - }, - { - "_key" : "Leuchars", - "_id" : "places/Leuchars", - "_rev" : "_Yi4-H6S--_", - "label" : "Leuchars" - }, - { - "_key" : "Edinburgh", - "_id" : "places/Edinburgh", - "_rev" : "_Yi4-H6W--B", - "label" : "Edinburgh" - }, - { - "_key" : "Glasgow", - "_id" : "places/Glasgow", - "_rev" : "_Yi4-H6a--_", - "label" : "Glasgow" - }, - { - "_key" : "Carlisle", - "_id" : "places/Carlisle", - "_rev" : "_Yi4-H6i--_", - "label" : "Carlisle" - }, - { - "_key" : "Birmingham", - "_id" : "places/Birmingham", - "_rev" : "_Yi4-H6m--_", - "label" : "Birmingham" - }, - { - "_key" : "London", - "_id" : "places/London", - "_rev" : "_Yi4-H6m--B", - "label" : "London" - }, - { - "_key" : "Brussels", - "_id" : "places/Brussels", - "_rev" : "_Yi4-H6q--_", - "label" : "Brussels" - }, - { - "_key" : "Cologne", - "_id" : "places/Cologne", - "_rev" : "_Yi4-H6u--_", - "label" : "Cologne" - } - ], - "weight" : 16.5 - }, - { - "edges" : [ - { - "_key" : "98573", - "_id" : "connections/98573", - "_from" : "places/StAndrews", - "_to" : "places/Leuchars", - "_rev" : "_Yi4-H8---B", - "travelTime" : 5 - }, - { - "_key" : "98528", - "_id" : "connections/98528", - "_from" : "places/Leuchars", - "_to" : "places/Edinburgh", - "_rev" : "_Yi4-H7S--_", - "travelTime" : 1.5 - }, - { - "_key" : "98534", - "_id" : "connections/98534", - "_from" : "places/Edinburgh", - "_to" : "places/Glasgow", - "_rev" : "_Yi4-H7W--_", - "travelTime" : 1 - }, - { - "_key" : "98546", - "_id" : "connections/98546", - "_from" : "places/Glasgow", - "_to" : "places/Carlisle", - "_rev" : "_Yi4-H7i--B", - "travelTime" : 1 - }, - { - "_key" : "98552", - "_id" : "connections/98552", - "_from" : "places/Carlisle", - "_to" : "places/York", - "_rev" : "_Yi4-H7q--_", - "travelTime" : 2.5 - }, - { - "_key" : "98576", - "_id" : "connections/98576", - "_from" : "places/York", - "_to" : "places/London", - "_rev" : "_Yi4-H8C--_", - "travelTime" : 1.8 - }, - { - "_key" : "98582", - "_id" : "connections/98582", - "_from" : "places/London", - "_to" : "places/Brussels", - "_rev" : "_Yi4-H8K--_", - "travelTime" : 2.5 - }, - { - "_key" : "98588", - "_id" : "connections/98588", - "_from" : "places/Brussels", - "_to" : "places/Cologne", - "_rev" : "_Yi4-H8O--_", - "travelTime" : 2 - } - ], - "vertices" : [ - { - "_key" : "StAndrews", - "_id" : "places/StAndrews", - "_rev" : "_Yi4-H6W--_", - "label" : "StAndrews" - }, - { - "_key" : "Leuchars", - "_id" : "places/Leuchars", - "_rev" : "_Yi4-H6S--_", - "label" : "Leuchars" - }, - { - "_key" : "Edinburgh", - "_id" : "places/Edinburgh", - "_rev" : "_Yi4-H6W--B", - "label" : "Edinburgh" - }, - { - "_key" : "Glasgow", - "_id" : "places/Glasgow", - "_rev" : "_Yi4-H6a--_", - "label" : "Glasgow" - }, - { - "_key" : "Carlisle", - "_id" : "places/Carlisle", - "_rev" : "_Yi4-H6i--_", - "label" : "Carlisle" - }, - { - "_key" : "York", - "_id" : "places/York", - "_rev" : "_Yi4-H6e--_", - "label" : "York" - }, - { - "_key" : "London", - "_id" : "places/London", - "_rev" : "_Yi4-H6m--B", - "label" : "London" - }, - { - "_key" : "Brussels", - "_id" : "places/Brussels", - "_rev" : "_Yi4-H6q--_", - "label" : "Brussels" - }, - { - "_key" : "Cologne", - "_id" : "places/Cologne", - "_rev" : "_Yi4-H6u--_", - "label" : "Cologne" - } - ], - "weight" : 17.3 - } -] -[object ArangoQueryCursor, count: 3, cached: false, hasMore: false] diff --git a/Documentation/Examples/GRAPHKSP_99_drop_graph.generated b/Documentation/Examples/GRAPHKSP_99_drop_graph.generated deleted file mode 100644 index b07c3b2d2cce..000000000000 --- a/Documentation/Examples/GRAPHKSP_99_drop_graph.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> examples.dropGraph("kShortestPathsGraph"); diff --git a/Documentation/Examples/GRAPHSP_01_create_graph.generated b/Documentation/Examples/GRAPHSP_01_create_graph.generated deleted file mode 100644 index b59b74bc10f3..000000000000 --- a/Documentation/Examples/GRAPHSP_01_create_graph.generated +++ /dev/null @@ -1,174 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("traversalGraph"); -arangosh> db.circles.toArray(); -[ - { - "_key" : "I", - "_id" : "circles/I", - "_rev" : "_YOn1FhK--L", - "label" : "9" - }, - { - "_key" : "G", - "_id" : "circles/G", - "_rev" : "_YOn1FhK--H", - "label" : "7" - }, - { - "_key" : "F", - "_id" : "circles/F", - "_rev" : "_YOn1FhK--F", - "label" : "6" - }, - { - "_key" : "A", - "_id" : "circles/A", - "_rev" : "_YOn1FhG--_", - "label" : "1" - }, - { - "_key" : "E", - "_id" : "circles/E", - "_rev" : "_YOn1FhK--D", - "label" : "5" - }, - { - "_key" : "C", - "_id" : "circles/C", - "_rev" : "_YOn1FhK--_", - "label" : "3" - }, - { - "_key" : "D", - "_id" : "circles/D", - "_rev" : "_YOn1FhK--B", - "label" : "4" - }, - { - "_key" : "J", - "_id" : "circles/J", - "_rev" : "_YOn1FhO--_", - "label" : "10" - }, - { - "_key" : "B", - "_id" : "circles/B", - "_rev" : "_YOn1FhG--B", - "label" : "2" - }, - { - "_key" : "H", - "_id" : "circles/H", - "_rev" : "_YOn1FhK--J", - "label" : "8" - }, - { - "_key" : "K", - "_id" : "circles/K", - "_rev" : "_YOn1FhO--B", - "label" : "11" - } -] -arangosh> db.edges.toArray(); -[ - { - "_key" : "98474", - "_id" : "edges/98474", - "_from" : "circles/A", - "_to" : "circles/G", - "_rev" : "_YOn1FhS--A", - "theFalse" : false, - "theTruth" : true, - "label" : "right_foo" - }, - { - "_key" : "98483", - "_id" : "edges/98483", - "_from" : "circles/G", - "_to" : "circles/J", - "_rev" : "_YOn1FhS--G", - "theFalse" : false, - "theTruth" : true, - "label" : "right_zip" - }, - { - "_key" : "98458", - "_id" : "edges/98458", - "_from" : "circles/A", - "_to" : "circles/B", - "_rev" : "_YOn1FhO--D", - "theFalse" : false, - "theTruth" : true, - "label" : "left_bar" - }, - { - "_key" : "98468", - "_id" : "edges/98468", - "_from" : "circles/B", - "_to" : "circles/E", - "_rev" : "_YOn1FhO--J", - "theFalse" : false, - "theTruth" : true, - "label" : "left_blub" - }, - { - "_key" : "98477", - "_id" : "edges/98477", - "_from" : "circles/G", - "_to" : "circles/H", - "_rev" : "_YOn1FhS--C", - "theFalse" : false, - "theTruth" : true, - "label" : "right_blob" - }, - { - "_key" : "98486", - "_id" : "edges/98486", - "_from" : "circles/J", - "_to" : "circles/K", - "_rev" : "_YOn1FhS--I", - "theFalse" : false, - "theTruth" : true, - "label" : "right_zup" - }, - { - "_key" : "98471", - "_id" : "edges/98471", - "_from" : "circles/E", - "_to" : "circles/F", - "_rev" : "_YOn1FhS---", - "theFalse" : false, - "theTruth" : true, - "label" : "left_schubi" - }, - { - "_key" : "98462", - "_id" : "edges/98462", - "_from" : "circles/B", - "_to" : "circles/C", - "_rev" : "_YOn1FhO--F", - "theFalse" : false, - "theTruth" : true, - "label" : "left_blarg" - }, - { - "_key" : "98465", - "_id" : "edges/98465", - "_from" : "circles/C", - "_to" : "circles/D", - "_rev" : "_YOn1FhO--H", - "theFalse" : false, - "theTruth" : true, - "label" : "left_blorg" - }, - { - "_key" : "98480", - "_id" : "edges/98480", - "_from" : "circles/H", - "_to" : "circles/I", - "_rev" : "_YOn1FhS--E", - "theFalse" : false, - "theTruth" : true, - "label" : "right_blub" - } -] diff --git a/Documentation/Examples/GRAPHSP_02_A_to_D.generated b/Documentation/Examples/GRAPHSP_02_A_to_D.generated deleted file mode 100644 index 0c651f215708..000000000000 --- a/Documentation/Examples/GRAPHSP_02_A_to_D.generated +++ /dev/null @@ -1,40 +0,0 @@ -arangosh> db._query("FOR v, e IN OUTBOUND SHORTEST_PATH 'circles/A' TO 'circles/D' GRAPH 'traversalGraph' RETURN [v._key, e._key]"); -[ - [ - "A", - null - ], - [ - "B", - "98458" - ], - [ - "C", - "98462" - ], - [ - "D", - "98465" - ] -] -[object ArangoQueryCursor, count: 4, cached: false, hasMore: false] -arangosh> db._query("FOR v, e IN OUTBOUND SHORTEST_PATH 'circles/A' TO 'circles/D' edges RETURN [v._key, e._key]"); -[ - [ - "A", - null - ], - [ - "B", - "98458" - ], - [ - "C", - "98462" - ], - [ - "D", - "98465" - ] -] -[object ArangoQueryCursor, count: 4, cached: false, hasMore: false] diff --git a/Documentation/Examples/GRAPHSP_03_A_to_D.generated b/Documentation/Examples/GRAPHSP_03_A_to_D.generated deleted file mode 100644 index 2a7b8e8e7038..000000000000 --- a/Documentation/Examples/GRAPHSP_03_A_to_D.generated +++ /dev/null @@ -1,40 +0,0 @@ -arangosh> db._query("FOR a IN circles FILTER a._key == 'A' FOR d IN circles FILTER d._key == 'D' FOR v, e IN OUTBOUND SHORTEST_PATH a TO d GRAPH 'traversalGraph' RETURN [v._key, e._key]"); -[ - [ - "A", - null - ], - [ - "B", - "98458" - ], - [ - "C", - "98462" - ], - [ - "D", - "98465" - ] -] -[object ArangoQueryCursor, count: 4, cached: false, hasMore: false] -arangosh> db._query("FOR a IN circles FILTER a._key == 'A' FOR d IN circles FILTER d._key == 'D' FOR v, e IN OUTBOUND SHORTEST_PATH a TO d edges RETURN [v._key, e._key]"); -[ - [ - "A", - null - ], - [ - "B", - "98458" - ], - [ - "C", - "98462" - ], - [ - "D", - "98465" - ] -] -[object ArangoQueryCursor, count: 4, cached: false, hasMore: false] diff --git a/Documentation/Examples/GRAPHSP_99_drop_graph.generated b/Documentation/Examples/GRAPHSP_99_drop_graph.generated deleted file mode 100644 index fd7a21f419cb..000000000000 --- a/Documentation/Examples/GRAPHSP_99_drop_graph.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> examples.dropGraph("traversalGraph"); diff --git a/Documentation/Examples/GRAPHTRAV_01_create_graph.generated b/Documentation/Examples/GRAPHTRAV_01_create_graph.generated deleted file mode 100644 index 33524106d1dd..000000000000 --- a/Documentation/Examples/GRAPHTRAV_01_create_graph.generated +++ /dev/null @@ -1,177 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("traversalGraph"); -arangosh> db.circles.toArray(); -[ - { - "_key" : "I", - "_id" : "circles/I", - "_rev" : "_YT2FO_e--_", - "label" : "9" - }, - { - "_key" : "G", - "_id" : "circles/G", - "_rev" : "_YT2FO_a--B", - "label" : "7" - }, - { - "_key" : "F", - "_id" : "circles/F", - "_rev" : "_YT2FO_a--_", - "label" : "6" - }, - { - "_key" : "A", - "_id" : "circles/A", - "_rev" : "_YT2FO_S--_", - "label" : "1" - }, - { - "_key" : "E", - "_id" : "circles/E", - "_rev" : "_YT2FO_W--B", - "label" : "5" - }, - { - "_key" : "C", - "_id" : "circles/C", - "_rev" : "_YT2FO_S--D", - "label" : "3" - }, - { - "_key" : "D", - "_id" : "circles/D", - "_rev" : "_YT2FO_W--_", - "label" : "4" - }, - { - "_key" : "J", - "_id" : "circles/J", - "_rev" : "_YT2FO_e--B", - "label" : "10" - }, - { - "_key" : "B", - "_id" : "circles/B", - "_rev" : "_YT2FO_S--B", - "label" : "2" - }, - { - "_key" : "H", - "_id" : "circles/H", - "_rev" : "_YT2FO_a--D", - "label" : "8" - }, - { - "_key" : "K", - "_id" : "circles/K", - "_rev" : "_YT2FO_e--D", - "label" : "11" - } -] -arangosh> db.edges.toArray(); -[ - { - "_key" : "98575", - "_id" : "edges/98575", - "_from" : "circles/B", - "_to" : "circles/C", - "_rev" : "_YT2FO_i--B", - "theFalse" : false, - "theTruth" : true, - "label" : "left_blarg" - }, - { - "_key" : "98581", - "_id" : "edges/98581", - "_from" : "circles/B", - "_to" : "circles/E", - "_rev" : "_YT2FO_m--B", - "theFalse" : false, - "theTruth" : true, - "label" : "left_blub" - }, - { - "_key" : "98584", - "_id" : "edges/98584", - "_from" : "circles/E", - "_to" : "circles/F", - "_rev" : "_YT2FO_m--D", - "theFalse" : false, - "theTruth" : true, - "label" : "left_schubi" - }, - { - "_key" : "98599", - "_id" : "edges/98599", - "_from" : "circles/J", - "_to" : "circles/K", - "_rev" : "_YT2FO_u--B", - "theFalse" : false, - "theTruth" : true, - "label" : "right_zup" - }, - { - "_key" : "98596", - "_id" : "edges/98596", - "_from" : "circles/G", - "_to" : "circles/J", - "_rev" : "_YT2FO_u--_", - "theFalse" : false, - "theTruth" : true, - "label" : "right_zip" - }, - { - "_key" : "98593", - "_id" : "edges/98593", - "_from" : "circles/H", - "_to" : "circles/I", - "_rev" : "_YT2FO_q--D", - "theFalse" : false, - "theTruth" : true, - "label" : "right_blub" - }, - { - "_key" : "98587", - "_id" : "edges/98587", - "_from" : "circles/A", - "_to" : "circles/G", - "_rev" : "_YT2FO_q--_", - "theFalse" : false, - "theTruth" : true, - "label" : "right_foo" - }, - { - "_key" : "98571", - "_id" : "edges/98571", - "_from" : "circles/A", - "_to" : "circles/B", - "_rev" : "_YT2FO_i--_", - "theFalse" : false, - "theTruth" : true, - "label" : "left_bar" - }, - { - "_key" : "98578", - "_id" : "edges/98578", - "_from" : "circles/C", - "_to" : "circles/D", - "_rev" : "_YT2FO_m--_", - "theFalse" : false, - "theTruth" : true, - "label" : "left_blorg" - }, - { - "_key" : "98590", - "_id" : "edges/98590", - "_from" : "circles/G", - "_to" : "circles/H", - "_rev" : "_YT2FO_q--B", - "theFalse" : false, - "theTruth" : true, - "label" : "right_blob" - } -] -arangosh> print("once you don't need them anymore, clean them up:"); -once you don't need them anymore, clean them up: -arangosh> examples.dropGraph("traversalGraph"); diff --git a/Documentation/Examples/GRAPHTRAV_02_traverse_all.generated b/Documentation/Examples/GRAPHTRAV_02_traverse_all.generated deleted file mode 100644 index 90880f83b2ee..000000000000 --- a/Documentation/Examples/GRAPHTRAV_02_traverse_all.generated +++ /dev/null @@ -1,28 +0,0 @@ -arangosh> db._query("FOR v IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' RETURN v._key"); -[ - "B", - "C", - "D", - "E", - "F", - "G", - "H", - "I", - "J", - "K" -] -[object ArangoQueryCursor, count: 10, cached: false, hasMore: false] -arangosh> db._query("FOR v IN 1..3 OUTBOUND 'circles/A' edges RETURN v._key"); -[ - "B", - "C", - "D", - "E", - "F", - "G", - "H", - "I", - "J", - "K" -] -[object ArangoQueryCursor, count: 10, cached: false, hasMore: false] diff --git a/Documentation/Examples/GRAPHTRAV_02_traverse_all_a.generated b/Documentation/Examples/GRAPHTRAV_02_traverse_all_a.generated deleted file mode 100644 index 9f0a5190e7f3..000000000000 --- a/Documentation/Examples/GRAPHTRAV_02_traverse_all_a.generated +++ /dev/null @@ -1,17 +0,0 @@ -@Q: - FOR v IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - RETURN v._key - -@R -[ - "B", - "C", - "D", - "E", - "F", - "G", - "H", - "I", - "J", - "K" -] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_02_traverse_all_b.generated b/Documentation/Examples/GRAPHTRAV_02_traverse_all_b.generated deleted file mode 100644 index 76a35979fb70..000000000000 --- a/Documentation/Examples/GRAPHTRAV_02_traverse_all_b.generated +++ /dev/null @@ -1,16 +0,0 @@ -@Q: - FOR v IN 1..3 OUTBOUND 'circles/A' edges RETURN v._key - -@R -[ - "B", - "C", - "D", - "E", - "F", - "G", - "H", - "I", - "J", - "K" -] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_03_traverse_3.generated b/Documentation/Examples/GRAPHTRAV_03_traverse_3.generated deleted file mode 100644 index 4721f059ce5e..000000000000 --- a/Documentation/Examples/GRAPHTRAV_03_traverse_3.generated +++ /dev/null @@ -1,16 +0,0 @@ -arangosh> db._query("FOR v IN 2..2 OUTBOUND 'circles/A' GRAPH 'traversalGraph' return v._key"); -[ - "C", - "E", - "H", - "J" -] -[object ArangoQueryCursor, count: 4, cached: false, hasMore: false] -arangosh> db._query("FOR v IN 2 OUTBOUND 'circles/A' GRAPH 'traversalGraph' return v._key"); -[ - "C", - "E", - "H", - "J" -] -[object ArangoQueryCursor, count: 4, cached: false, hasMore: false] diff --git a/Documentation/Examples/GRAPHTRAV_03_traverse_3a.generated b/Documentation/Examples/GRAPHTRAV_03_traverse_3a.generated deleted file mode 100644 index 24de850fabbf..000000000000 --- a/Documentation/Examples/GRAPHTRAV_03_traverse_3a.generated +++ /dev/null @@ -1,11 +0,0 @@ -@Q: - FOR v IN 2..2 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - RETURN v._key - -@R -[ - "C", - "E", - "H", - "J" -] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_03_traverse_3b.generated b/Documentation/Examples/GRAPHTRAV_03_traverse_3b.generated deleted file mode 100644 index 36f4b1d1ff58..000000000000 --- a/Documentation/Examples/GRAPHTRAV_03_traverse_3b.generated +++ /dev/null @@ -1,11 +0,0 @@ -@Q: - FOR v IN 2 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - RETURN v._key - -@R -[ - "C", - "E", - "H", - "J" -] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_04_traverse_4.generated b/Documentation/Examples/GRAPHTRAV_04_traverse_4.generated deleted file mode 100644 index d9cdf707000c..000000000000 --- a/Documentation/Examples/GRAPHTRAV_04_traverse_4.generated +++ /dev/null @@ -1,18 +0,0 @@ -arangosh> db._query("FOR v, e, p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' FILTER p.vertices[1]._key != 'G' RETURN v._key"); -[ - "B", - "C", - "D", - "E", - "F" -] -[object ArangoQueryCursor, count: 5, cached: false, hasMore: false] -arangosh> db._query("FOR v, e, p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' FILTER p.edges[0].label != 'right_foo' RETURN v._key"); -[ - "B", - "C", - "D", - "E", - "F" -] -[object ArangoQueryCursor, count: 5, cached: false, hasMore: false] diff --git a/Documentation/Examples/GRAPHTRAV_04_traverse_4a.generated b/Documentation/Examples/GRAPHTRAV_04_traverse_4a.generated deleted file mode 100644 index 9362b4c3f04f..000000000000 --- a/Documentation/Examples/GRAPHTRAV_04_traverse_4a.generated +++ /dev/null @@ -1,13 +0,0 @@ -@Q: - FOR v, e, p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.vertices[1]._key != 'G' - RETURN v._key - -@R -[ - "B", - "C", - "D", - "E", - "F" -] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_04_traverse_4b.generated b/Documentation/Examples/GRAPHTRAV_04_traverse_4b.generated deleted file mode 100644 index c01b12a6c91e..000000000000 --- a/Documentation/Examples/GRAPHTRAV_04_traverse_4b.generated +++ /dev/null @@ -1,13 +0,0 @@ -@Q: - FOR v, e, p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.edges[0].label != 'right_foo' - RETURN v._key - -@R -[ - "B", - "C", - "D", - "E", - "F" -] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_05_traverse_5.generated b/Documentation/Examples/GRAPHTRAV_05_traverse_5.generated deleted file mode 100644 index 8a20ebb02566..000000000000 --- a/Documentation/Examples/GRAPHTRAV_05_traverse_5.generated +++ /dev/null @@ -1,14 +0,0 @@ -arangosh> db._query("FOR v,e,p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' FILTER p.vertices[1]._key != 'G' FILTER p.edges[1].label != 'left_blub' return v._key"); -[ - "B", - "C", - "D" -] -[object ArangoQueryCursor, count: 3, cached: false, hasMore: false] -arangosh> db._query("FOR v,e,p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' FILTER p.vertices[1]._key != 'G' AND p.edges[1].label != 'left_blub' return v._key"); -[ - "B", - "C", - "D" -] -[object ArangoQueryCursor, count: 3, cached: false, hasMore: false] diff --git a/Documentation/Examples/GRAPHTRAV_05_traverse_5a.generated b/Documentation/Examples/GRAPHTRAV_05_traverse_5a.generated deleted file mode 100644 index 7d7fb643aa0f..000000000000 --- a/Documentation/Examples/GRAPHTRAV_05_traverse_5a.generated +++ /dev/null @@ -1,12 +0,0 @@ -@Q: - FOR v,e,p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.vertices[1]._key != 'G' - FILTER p.edges[1].label != 'left_blub' - RETURN v._key - -@R -[ - "B", - "C", - "D" -] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_05_traverse_5b.generated b/Documentation/Examples/GRAPHTRAV_05_traverse_5b.generated deleted file mode 100644 index 48b88cf2475f..000000000000 --- a/Documentation/Examples/GRAPHTRAV_05_traverse_5b.generated +++ /dev/null @@ -1,11 +0,0 @@ -@Q: - FOR v,e,p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.vertices[1]._key != 'G' AND p.edges[1].label != 'left_blub' - RETURN v._key - -@R -[ - "B", - "C", - "D" -] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_06_traverse_6a.generated b/Documentation/Examples/GRAPHTRAV_06_traverse_6a.generated deleted file mode 100644 index ef0ba8acc797..000000000000 --- a/Documentation/Examples/GRAPHTRAV_06_traverse_6a.generated +++ /dev/null @@ -1,8 +0,0 @@ -@Q: - FOR v IN 1..3 OUTBOUND 'circles/E' GRAPH 'traversalGraph' - RETURN v._key - -@R -[ - "F" -] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_06_traverse_6b.generated b/Documentation/Examples/GRAPHTRAV_06_traverse_6b.generated deleted file mode 100644 index e2adc257964a..000000000000 --- a/Documentation/Examples/GRAPHTRAV_06_traverse_6b.generated +++ /dev/null @@ -1,9 +0,0 @@ -@Q: - FOR v IN 1..3 INBOUND 'circles/E' GRAPH 'traversalGraph' - RETURN v._key - -@R -[ - "B", - "A" -] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_06_traverse_6c.generated b/Documentation/Examples/GRAPHTRAV_06_traverse_6c.generated deleted file mode 100644 index fe3cbbb999a2..000000000000 --- a/Documentation/Examples/GRAPHTRAV_06_traverse_6c.generated +++ /dev/null @@ -1,13 +0,0 @@ -@Q: - FOR v IN 1..3 ANY 'circles/E' GRAPH 'traversalGraph' - RETURN v._key - -@R -[ - "F", - "B", - "C", - "D", - "A", - "G" -] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_06_traverse_reverse_6.generated b/Documentation/Examples/GRAPHTRAV_06_traverse_reverse_6.generated deleted file mode 100644 index 122eb9a80191..000000000000 --- a/Documentation/Examples/GRAPHTRAV_06_traverse_reverse_6.generated +++ /dev/null @@ -1,21 +0,0 @@ -arangosh> db._query("FOR v IN 1..3 OUTBOUND 'circles/E' GRAPH 'traversalGraph' return v._key"); -[ - "F" -] -[object ArangoQueryCursor, count: 1, cached: false, hasMore: false] -arangosh> db._query("FOR v IN 1..3 INBOUND 'circles/E' GRAPH 'traversalGraph' return v._key"); -[ - "B", - "A" -] -[object ArangoQueryCursor, count: 2, cached: false, hasMore: false] -arangosh> db._query("FOR v IN 1..3 ANY 'circles/E' GRAPH 'traversalGraph' return v._key"); -[ - "F", - "B", - "C", - "D", - "A", - "G" -] -[object ArangoQueryCursor, count: 6, cached: false, hasMore: false] diff --git a/Documentation/Examples/GRAPHTRAV_07_traverse_7.generated b/Documentation/Examples/GRAPHTRAV_07_traverse_7.generated deleted file mode 100644 index 6ca606eb6753..000000000000 --- a/Documentation/Examples/GRAPHTRAV_07_traverse_7.generated +++ /dev/null @@ -1,42 +0,0 @@ -@Q: - FOR v,e,p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - LET localScopeVar = RAND() > 0.5 - FILTER p.edges[0].theTruth != localScopeVar - RETURN v._key - -@R -Query String: - FOR v,e,p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - LET localScopeVar = RAND() > 0.5 - FILTER p.edges[0].theTruth != localScopeVar - RETURN v._key - - -Execution plan: - Id NodeType Est. Comment - 1 SingletonNode 1 * ROOT - 2 TraversalNode 1 - FOR v /* vertex */, p /* paths */ IN 1..3 /* min..maxPathDepth */ OUTBOUND 'circles/A' /* startnode */ GRAPH 'traversalGraph' - 3 CalculationNode 1 - LET localScopeVar = (RAND() > 0.5) /* simple expression */ - 4 CalculationNode 1 - LET #6 = (p.`edges`[0].`theTruth` != localScopeVar) /* simple expression */ - 5 FilterNode 1 - FILTER #6 - 6 CalculationNode 1 - LET #8 = v.`_key` /* attribute expression */ - 7 ReturnNode 1 - RETURN #8 - -Indexes used: - By Type Collection Unique Sparse Selectivity Fields Ranges - 2 edge edges false false n/a [ `_from`, `_to` ] base OUTBOUND - -Functions used: - Name Deterministic Cacheable Uses V8 - RAND false false false - -Traversals on graphs: - Id Depth Vertex collections Edge collections Options Filter / Prune Conditions - 2 1..3 circles edges uniqueVertices: none, uniqueEdges: path - -Optimization rules applied: - Id RuleName - 1 move-calculations-up - 2 optimize-traversals - 3 move-calculations-down - diff --git a/Documentation/Examples/GRAPHTRAV_07_traverse_8.generated b/Documentation/Examples/GRAPHTRAV_07_traverse_8.generated deleted file mode 100644 index 5897f8f8f3b5..000000000000 --- a/Documentation/Examples/GRAPHTRAV_07_traverse_8.generated +++ /dev/null @@ -1,39 +0,0 @@ -@Q: - FOR v,e,p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.edges[0].label == 'right_foo' - RETURN v._key - -@R -Query String: - FOR v,e,p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.edges[0].label == 'right_foo' - RETURN v._key - - -Execution plan: - Id NodeType Est. Comment - 1 SingletonNode 1 * ROOT - 2 TraversalNode 1 - FOR v /* vertex */ IN 1..3 /* min..maxPathDepth */ OUTBOUND 'circles/A' /* startnode */ GRAPH 'traversalGraph' - 5 CalculationNode 1 - LET #7 = v.`_key` /* attribute expression */ - 6 ReturnNode 1 - RETURN #7 - -Indexes used: - By Type Collection Unique Sparse Selectivity Fields Ranges - 2 edge edges false false n/a [ `_from`, `_to` ] base OUTBOUND - 2 edge edges false false n/a [ `_from`, `_to` ] level 0 OUTBOUND - -Traversals on graphs: - Id Depth Vertex collections Edge collections Options Filter / Prune Conditions - 2 1..3 circles edges uniqueVertices: none, uniqueEdges: path FILTER (p.`edges`[0].`label` == "right_foo") - -Optimization rules applied: - Id RuleName - 1 move-calculations-up - 2 move-filters-up - 3 move-calculations-up-2 - 4 move-filters-up-2 - 5 optimize-traversals - 6 remove-filter-covered-by-traversal - 7 remove-unnecessary-calculations-2 - 8 remove-redundant-path-var - diff --git a/Documentation/Examples/GRAPHTRAV_99_drop_graph.generated b/Documentation/Examples/GRAPHTRAV_99_drop_graph.generated deleted file mode 100644 index fd7a21f419cb..000000000000 --- a/Documentation/Examples/GRAPHTRAV_99_drop_graph.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> examples.dropGraph("traversalGraph"); diff --git a/Documentation/Examples/GRAPHTRAV_graphFilterCombine.generated b/Documentation/Examples/GRAPHTRAV_graphFilterCombine.generated deleted file mode 100644 index 9d6a22cece48..000000000000 --- a/Documentation/Examples/GRAPHTRAV_graphFilterCombine.generated +++ /dev/null @@ -1,58 +0,0 @@ -@Q: - FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.edges[0].theTruth == true - AND p.edges[1].theFalse == false - FILTER p.vertices[1]._key == "G" - RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label } - -@R -[ - { - "vertices": [ - "A", - "G", - "H" - ], - "edges": [ - "right_foo", - "right_blob" - ] - }, - { - "vertices": [ - "A", - "G", - "H", - "I" - ], - "edges": [ - "right_foo", - "right_blob", - "right_blub" - ] - }, - { - "vertices": [ - "A", - "G", - "J" - ], - "edges": [ - "right_foo", - "right_zip" - ] - }, - { - "vertices": [ - "A", - "G", - "J", - "K" - ], - "edges": [ - "right_foo", - "right_zip", - "right_zup" - ] - } -] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_graphFilterEdges.generated b/Documentation/Examples/GRAPHTRAV_graphFilterEdges.generated deleted file mode 100644 index 557ab03f41ce..000000000000 --- a/Documentation/Examples/GRAPHTRAV_graphFilterEdges.generated +++ /dev/null @@ -1,122 +0,0 @@ -@Q: - FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.edges[0].theTruth == true - RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label } - -@R -[ - { - "vertices": [ - "A", - "B" - ], - "edges": [ - "left_bar" - ] - }, - { - "vertices": [ - "A", - "B", - "C" - ], - "edges": [ - "left_bar", - "left_blarg" - ] - }, - { - "vertices": [ - "A", - "B", - "C", - "D" - ], - "edges": [ - "left_bar", - "left_blarg", - "left_blorg" - ] - }, - { - "vertices": [ - "A", - "B", - "E" - ], - "edges": [ - "left_bar", - "left_blub" - ] - }, - { - "vertices": [ - "A", - "B", - "E", - "F" - ], - "edges": [ - "left_bar", - "left_blub", - "left_schubi" - ] - }, - { - "vertices": [ - "A", - "G" - ], - "edges": [ - "right_foo" - ] - }, - { - "vertices": [ - "A", - "G", - "H" - ], - "edges": [ - "right_foo", - "right_blob" - ] - }, - { - "vertices": [ - "A", - "G", - "H", - "I" - ], - "edges": [ - "right_foo", - "right_blob", - "right_blub" - ] - }, - { - "vertices": [ - "A", - "G", - "J" - ], - "edges": [ - "right_foo", - "right_zip" - ] - }, - { - "vertices": [ - "A", - "G", - "J", - "K" - ], - "edges": [ - "right_foo", - "right_zip", - "right_zup" - ] - } -] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_graphFilterEntirePath.generated b/Documentation/Examples/GRAPHTRAV_graphFilterEntirePath.generated deleted file mode 100644 index 712319b3e0c8..000000000000 --- a/Documentation/Examples/GRAPHTRAV_graphFilterEntirePath.generated +++ /dev/null @@ -1,122 +0,0 @@ -@Q: - FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.edges[*].theTruth ALL == true - RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label } - -@R -[ - { - "vertices": [ - "A", - "B" - ], - "edges": [ - "left_bar" - ] - }, - { - "vertices": [ - "A", - "B", - "C" - ], - "edges": [ - "left_bar", - "left_blarg" - ] - }, - { - "vertices": [ - "A", - "B", - "C", - "D" - ], - "edges": [ - "left_bar", - "left_blarg", - "left_blorg" - ] - }, - { - "vertices": [ - "A", - "B", - "E" - ], - "edges": [ - "left_bar", - "left_blub" - ] - }, - { - "vertices": [ - "A", - "B", - "E", - "F" - ], - "edges": [ - "left_bar", - "left_blub", - "left_schubi" - ] - }, - { - "vertices": [ - "A", - "G" - ], - "edges": [ - "right_foo" - ] - }, - { - "vertices": [ - "A", - "G", - "H" - ], - "edges": [ - "right_foo", - "right_blob" - ] - }, - { - "vertices": [ - "A", - "G", - "H", - "I" - ], - "edges": [ - "right_foo", - "right_blob", - "right_blub" - ] - }, - { - "vertices": [ - "A", - "G", - "J" - ], - "edges": [ - "right_foo", - "right_zip" - ] - }, - { - "vertices": [ - "A", - "G", - "J", - "K" - ], - "edges": [ - "right_foo", - "right_zip", - "right_zup" - ] - } -] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_graphFilterPathAnyEdge.generated b/Documentation/Examples/GRAPHTRAV_graphFilterPathAnyEdge.generated deleted file mode 100644 index 953fb9be3057..000000000000 --- a/Documentation/Examples/GRAPHTRAV_graphFilterPathAnyEdge.generated +++ /dev/null @@ -1,122 +0,0 @@ -@Q: - FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.edges[*].theTruth ANY == true - RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label } - -@R -[ - { - "vertices": [ - "A", - "B" - ], - "edges": [ - "left_bar" - ] - }, - { - "vertices": [ - "A", - "B", - "C" - ], - "edges": [ - "left_bar", - "left_blarg" - ] - }, - { - "vertices": [ - "A", - "B", - "C", - "D" - ], - "edges": [ - "left_bar", - "left_blarg", - "left_blorg" - ] - }, - { - "vertices": [ - "A", - "B", - "E" - ], - "edges": [ - "left_bar", - "left_blub" - ] - }, - { - "vertices": [ - "A", - "B", - "E", - "F" - ], - "edges": [ - "left_bar", - "left_blub", - "left_schubi" - ] - }, - { - "vertices": [ - "A", - "G" - ], - "edges": [ - "right_foo" - ] - }, - { - "vertices": [ - "A", - "G", - "H" - ], - "edges": [ - "right_foo", - "right_blob" - ] - }, - { - "vertices": [ - "A", - "G", - "H", - "I" - ], - "edges": [ - "right_foo", - "right_blob", - "right_blub" - ] - }, - { - "vertices": [ - "A", - "G", - "J" - ], - "edges": [ - "right_foo", - "right_zip" - ] - }, - { - "vertices": [ - "A", - "G", - "J", - "K" - ], - "edges": [ - "right_foo", - "right_zip", - "right_zup" - ] - } -] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_graphFilterPathEdges.generated b/Documentation/Examples/GRAPHTRAV_graphFilterPathEdges.generated deleted file mode 100644 index 261bff32fe38..000000000000 --- a/Documentation/Examples/GRAPHTRAV_graphFilterPathEdges.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: - FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.edges[*].theTruth NONE == true - RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label } - -@R -[] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_graphFilterVertices.generated b/Documentation/Examples/GRAPHTRAV_graphFilterVertices.generated deleted file mode 100644 index bf27ab59998a..000000000000 --- a/Documentation/Examples/GRAPHTRAV_graphFilterVertices.generated +++ /dev/null @@ -1,65 +0,0 @@ -@Q: - FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - FILTER p.vertices[1]._key == "G" - RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label } - -@R -[ - { - "vertices": [ - "A", - "G" - ], - "edges": [ - "right_foo" - ] - }, - { - "vertices": [ - "A", - "G", - "H" - ], - "edges": [ - "right_foo", - "right_blob" - ] - }, - { - "vertices": [ - "A", - "G", - "H", - "I" - ], - "edges": [ - "right_foo", - "right_blob", - "right_blub" - ] - }, - { - "vertices": [ - "A", - "G", - "J" - ], - "edges": [ - "right_foo", - "right_zip" - ] - }, - { - "vertices": [ - "A", - "G", - "J", - "K" - ], - "edges": [ - "right_foo", - "right_zip", - "right_zup" - ] - } -] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_graphPruneCollection.generated b/Documentation/Examples/GRAPHTRAV_graphPruneCollection.generated deleted file mode 100644 index ab60aa20b55b..000000000000 --- a/Documentation/Examples/GRAPHTRAV_graphPruneCollection.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: - FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - PRUNE IS_SAME_COLLECTION('circles', v) - RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label } - -@R -[] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_graphPruneEdges.generated b/Documentation/Examples/GRAPHTRAV_graphPruneEdges.generated deleted file mode 100644 index dc7b6eea7bce..000000000000 --- a/Documentation/Examples/GRAPHTRAV_graphPruneEdges.generated +++ /dev/null @@ -1,26 +0,0 @@ -@Q: - FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - PRUNE e.theTruth == true - RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label } - -@R -[ - { - "vertices": [ - "A", - "B" - ], - "edges": [ - "left_bar" - ] - }, - { - "vertices": [ - "A", - "G" - ], - "edges": [ - "right_foo" - ] - } -] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_graphPruneVertices.generated b/Documentation/Examples/GRAPHTRAV_graphPruneVertices.generated deleted file mode 100644 index 0cec84af4e5d..000000000000 --- a/Documentation/Examples/GRAPHTRAV_graphPruneVertices.generated +++ /dev/null @@ -1,18 +0,0 @@ -@Q: - FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph' - PRUNE v._key == 'G' - FILTER v._key == 'G' - RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label } - -@R -[ - { - "vertices": [ - "A", - "G" - ], - "edges": [ - "right_foo" - ] - } -] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_multiplePathSearch.generated b/Documentation/Examples/GRAPHTRAV_multiplePathSearch.generated deleted file mode 100644 index 2cac12ca7e6c..000000000000 --- a/Documentation/Examples/GRAPHTRAV_multiplePathSearch.generated +++ /dev/null @@ -1,11 +0,0 @@ -@Q: - RETURN LENGTH( - FOR v IN OUTBOUND - SHORTEST_PATH "mps_verts/A" TO "mps_verts/C" mps_edges - RETURN v - ) - -@R -[ - 3 -] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_multiplePathSearch2.generated b/Documentation/Examples/GRAPHTRAV_multiplePathSearch2.generated deleted file mode 100644 index b2f49c97d073..000000000000 --- a/Documentation/Examples/GRAPHTRAV_multiplePathSearch2.generated +++ /dev/null @@ -1,10 +0,0 @@ -@Q: - FOR v, e, p IN 2..2 OUTBOUND "mps_verts/A" mps_edges - FILTER v._id == "mps_verts/C" - RETURN CONCAT_SEPARATOR(" -> ", p.vertices[*]._key) - -@R -[ - "A -> B -> C", - "A -> D -> C" -] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_removeVertex1.generated b/Documentation/Examples/GRAPHTRAV_removeVertex1.generated deleted file mode 100644 index a7186933fbdd..000000000000 --- a/Documentation/Examples/GRAPHTRAV_removeVertex1.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -LET edgeKeys = (FOR v, e IN 1..1 ANY 'persons/eve' GRAPH 'knows_graph' RETURN e._key) -LET r = (FOR key IN edgeKeys REMOVE key IN knows) -REMOVE 'eve' IN persons - -@R -[] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_removeVertex2.generated b/Documentation/Examples/GRAPHTRAV_removeVertex2.generated deleted file mode 100644 index 9e206fbd1359..000000000000 --- a/Documentation/Examples/GRAPHTRAV_removeVertex2.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -LET edgeKeys = (FOR v, e IN 1..1 ANY 'persons/eve' GRAPH 'knows_graph' - REMOVE e._key IN knows) -REMOVE 'eve' IN persons - -@R -[] \ No newline at end of file diff --git a/Documentation/Examples/GRAPHTRAV_removeVertex3.generated b/Documentation/Examples/GRAPHTRAV_removeVertex3.generated deleted file mode 100644 index dcc7f0abfe2d..000000000000 --- a/Documentation/Examples/GRAPHTRAV_removeVertex3.generated +++ /dev/null @@ -1,10 +0,0 @@ -@Q: -LET edgeKeys = (FOR v, e IN 1..1 ANY 'germanCity/Berlin' GRAPH 'routeplanner' RETURN e._key) -LET r = (FOR key IN edgeKeys REMOVE key IN internationalHighway - OPTIONS { ignoreErrors: true } REMOVE key IN germanHighway - OPTIONS { ignoreErrors: true } REMOVE key IN frenchHighway - OPTIONS { ignoreErrors: true }) -REMOVE 'Berlin' IN germanCity - -@R -[] \ No newline at end of file diff --git a/Documentation/Examples/HttpGharialAddEdge.generated b/Documentation/Examples/HttpGharialAddEdge.generated deleted file mode 100644 index 361824b1392a..000000000000 --- a/Documentation/Examples/HttpGharialAddEdge.generated +++ /dev/null @@ -1,22 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/gharial/social/edge/relation <<EOF -{ - "type" : "friend", - "_from" : "female/alice", - "_to" : "female/diana" -} -EOF - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -etag: _YOn1Gku--_ -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 202, - "edge" : { - "_id" : "relation/100926", - "_key" : "100926", - "_rev" : "_YOn1Gku--_" - } -} diff --git a/Documentation/Examples/HttpGharialAddEdgeCol.generated b/Documentation/Examples/HttpGharialAddEdgeCol.generated deleted file mode 100644 index b2a53ecf161c..000000000000 --- a/Documentation/Examples/HttpGharialAddEdgeCol.generated +++ /dev/null @@ -1,55 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/gharial/social/edge <<EOF -{ - "collection" : "works_in", - "from" : [ - "female", - "male" - ], - "to" : [ - "city" - ] -} -EOF - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -etag: _YOn1Goy--B -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 202, - "graph" : { - "_key" : "social", - "numberOfShards" : 1, - "replicationFactor" : 1, - "isSmart" : false, - "edgeDefinitions" : [ - { - "collection" : "relation", - "from" : [ - "female", - "male" - ], - "to" : [ - "female", - "male" - ] - }, - { - "collection" : "works_in", - "from" : [ - "female", - "male" - ], - "to" : [ - "city" - ] - } - ], - "orphanCollections" : [ ], - "_rev" : "_YOn1Goy--B", - "_id" : "_graphs/social", - "name" : "social" - } -} diff --git a/Documentation/Examples/HttpGharialAddVertex.generated b/Documentation/Examples/HttpGharialAddVertex.generated deleted file mode 100644 index f85e4d760328..000000000000 --- a/Documentation/Examples/HttpGharialAddVertex.generated +++ /dev/null @@ -1,20 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/gharial/social/vertex/male <<EOF -{ - "name" : "Francis" -} -EOF - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -etag: _YOn1GtO--_ -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 202, - "vertex" : { - "_id" : "male/101105", - "_key" : "101105", - "_rev" : "_YOn1GtO--_" - } -} diff --git a/Documentation/Examples/HttpGharialAddVertexCol.generated b/Documentation/Examples/HttpGharialAddVertexCol.generated deleted file mode 100644 index 2823e1ca8425..000000000000 --- a/Documentation/Examples/HttpGharialAddVertexCol.generated +++ /dev/null @@ -1,40 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/gharial/social/vertex <<EOF -{ - "collection" : "otherVertices" -} -EOF - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -etag: _YOn1Gwm--B -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 202, - "graph" : { - "_key" : "social", - "numberOfShards" : 1, - "replicationFactor" : 1, - "isSmart" : false, - "edgeDefinitions" : [ - { - "collection" : "relation", - "from" : [ - "female", - "male" - ], - "to" : [ - "female", - "male" - ] - } - ], - "orphanCollections" : [ - "otherVertices" - ], - "_rev" : "_YOn1Gwm--B", - "_id" : "_graphs/social", - "name" : "social" - } -} diff --git a/Documentation/Examples/HttpGharialCreate.generated b/Documentation/Examples/HttpGharialCreate.generated deleted file mode 100644 index 2b76b4fcf868..000000000000 --- a/Documentation/Examples/HttpGharialCreate.generated +++ /dev/null @@ -1,47 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/gharial <<EOF -{ - "name" : "myGraph", - "edgeDefinitions" : [ - { - "collection" : "edges", - "from" : [ - "startVertices" - ], - "to" : [ - "endVertices" - ] - } - ] -} -EOF - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -etag: _YOn1G0O--B -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 202, - "graph" : { - "_key" : "myGraph", - "numberOfShards" : 1, - "replicationFactor" : 1, - "isSmart" : false, - "edgeDefinitions" : [ - { - "collection" : "edges", - "from" : [ - "startVertices" - ], - "to" : [ - "endVertices" - ] - } - ], - "orphanCollections" : [ ], - "_rev" : "_YOn1G0O--B", - "_id" : "_graphs/myGraph", - "name" : "myGraph" - } -} diff --git a/Documentation/Examples/HttpGharialCreate2.generated b/Documentation/Examples/HttpGharialCreate2.generated deleted file mode 100644 index 47711e9c4e9e..000000000000 --- a/Documentation/Examples/HttpGharialCreate2.generated +++ /dev/null @@ -1,56 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/gharial <<EOF -{ - "name" : "myGraph", - "edgeDefinitions" : [ - { - "collection" : "edges", - "from" : [ - "startVertices" - ], - "to" : [ - "endVertices" - ] - } - ], - "isSmart" : true, - "options" : { - "replicationFactor" : 2, - "numberOfShards" : 9, - "smartGraphAttribute" : "region" - } -} -EOF - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -etag: _YOn1G3a--_ -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 202, - "graph" : { - "_key" : "myGraph", - "numberOfShards" : 9, - "replicationFactor" : 2, - "isSmart" : true, - "edgeDefinitions" : [ - { - "collection" : "edges", - "from" : [ - "startVertices" - ], - "to" : [ - "endVertices" - ] - } - ], - "orphanCollections" : [ ], - "initial" : "startVertices", - "initialCid" : 101271, - "smartGraphAttribute" : "region", - "_rev" : "_YOn1G3a--_", - "_id" : "_graphs/myGraph", - "name" : "myGraph" - } -} diff --git a/Documentation/Examples/HttpGharialDeleteEdge.generated b/Documentation/Examples/HttpGharialDeleteEdge.generated deleted file mode 100644 index f8e43bdabf4b..000000000000 --- a/Documentation/Examples/HttpGharialDeleteEdge.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/gharial/social/edge/relation/101350 - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 202, - "removed" : true -} diff --git a/Documentation/Examples/HttpGharialDeleteVertex.generated b/Documentation/Examples/HttpGharialDeleteVertex.generated deleted file mode 100644 index 4947da005e87..000000000000 --- a/Documentation/Examples/HttpGharialDeleteVertex.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/gharial/social/vertex/female/alice - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 202, - "removed" : true -} diff --git a/Documentation/Examples/HttpGharialDrop.generated b/Documentation/Examples/HttpGharialDrop.generated deleted file mode 100644 index c73a120e9f7e..000000000000 --- a/Documentation/Examples/HttpGharialDrop.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/gharial/social?dropCollections=true - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 202, - "removed" : true -} diff --git a/Documentation/Examples/HttpGharialEdgeDefinitionRemove.generated b/Documentation/Examples/HttpGharialEdgeDefinitionRemove.generated deleted file mode 100644 index 0234d440e77e..000000000000 --- a/Documentation/Examples/HttpGharialEdgeDefinitionRemove.generated +++ /dev/null @@ -1,25 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/gharial/social/edge/relation - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -etag: _YOn1HG---F -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 202, - "graph" : { - "_key" : "social", - "numberOfShards" : 1, - "replicationFactor" : 1, - "isSmart" : false, - "edgeDefinitions" : [ ], - "orphanCollections" : [ - "female", - "male" - ], - "_rev" : "_YOn1HG---F", - "_id" : "_graphs/social", - "name" : "social" - } -} diff --git a/Documentation/Examples/HttpGharialGetEdge.generated b/Documentation/Examples/HttpGharialGetEdge.generated deleted file mode 100644 index f897ece01880..000000000000 --- a/Documentation/Examples/HttpGharialGetEdge.generated +++ /dev/null @@ -1,20 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/gharial/social/edge/relation/101670 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -etag: _YOn1HJG--J -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "edge" : { - "_key" : "101670", - "_id" : "relation/101670", - "_from" : "female/alice", - "_to" : "male/charly", - "_rev" : "_YOn1HJG--J", - "type" : "friend", - "vertex" : "alice" - } -} diff --git a/Documentation/Examples/HttpGharialGetGraph.generated b/Documentation/Examples/HttpGharialGetGraph.generated deleted file mode 100644 index 6afc0576b271..000000000000 --- a/Documentation/Examples/HttpGharialGetGraph.generated +++ /dev/null @@ -1,31 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/gharial/myGraph - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "graph" : { - "_key" : "myGraph", - "numberOfShards" : 1, - "replicationFactor" : 1, - "isSmart" : false, - "edgeDefinitions" : [ - { - "collection" : "edges", - "from" : [ - "startVertices" - ], - "to" : [ - "endVertices" - ] - } - ], - "orphanCollections" : [ ], - "_rev" : "_YOn1HMu--B", - "_id" : "_graphs/myGraph", - "name" : "myGraph" - } -} diff --git a/Documentation/Examples/HttpGharialGetVertex.generated b/Documentation/Examples/HttpGharialGetVertex.generated deleted file mode 100644 index 9df47506a123..000000000000 --- a/Documentation/Examples/HttpGharialGetVertex.generated +++ /dev/null @@ -1,17 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/gharial/social/vertex/female/alice - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -etag: _YOn1HP6--_ -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "vertex" : { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1HP6--_", - "name" : "Alice" - } -} diff --git a/Documentation/Examples/HttpGharialList.generated b/Documentation/Examples/HttpGharialList.generated deleted file mode 100644 index 6a392b428177..000000000000 --- a/Documentation/Examples/HttpGharialList.generated +++ /dev/null @@ -1,74 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/gharial - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "graphs" : [ - { - "_key" : "routeplanner", - "_id" : "_graphs/routeplanner", - "_rev" : "_YOn1HVm--B", - "numberOfShards" : 1, - "replicationFactor" : 1, - "isSmart" : false, - "edgeDefinitions" : [ - { - "collection" : "frenchHighway", - "from" : [ - "frenchCity" - ], - "to" : [ - "frenchCity" - ] - }, - { - "collection" : "germanHighway", - "from" : [ - "germanCity" - ], - "to" : [ - "germanCity" - ] - }, - { - "collection" : "internationalHighway", - "from" : [ - "frenchCity", - "germanCity" - ], - "to" : [ - "frenchCity", - "germanCity" - ] - } - ], - "orphanCollections" : [ ] - }, - { - "_key" : "social", - "_id" : "_graphs/social", - "_rev" : "_YOn1HT---B", - "numberOfShards" : 1, - "replicationFactor" : 1, - "isSmart" : false, - "edgeDefinitions" : [ - { - "collection" : "relation", - "from" : [ - "female", - "male" - ], - "to" : [ - "female", - "male" - ] - } - ], - "orphanCollections" : [ ] - } - ] -} diff --git a/Documentation/Examples/HttpGharialListEdge.generated b/Documentation/Examples/HttpGharialListEdge.generated deleted file mode 100644 index 42ee315aac60..000000000000 --- a/Documentation/Examples/HttpGharialListEdge.generated +++ /dev/null @@ -1,13 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/gharial/social/edge - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "collections" : [ - "relation" - ] -} diff --git a/Documentation/Examples/HttpGharialListVertex.generated b/Documentation/Examples/HttpGharialListVertex.generated deleted file mode 100644 index be791d9a353c..000000000000 --- a/Documentation/Examples/HttpGharialListVertex.generated +++ /dev/null @@ -1,14 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/gharial/social/vertex - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "collections" : [ - "female", - "male" - ] -} diff --git a/Documentation/Examples/HttpGharialModifyVertex.generated b/Documentation/Examples/HttpGharialModifyVertex.generated deleted file mode 100644 index bd27da73e4d2..000000000000 --- a/Documentation/Examples/HttpGharialModifyVertex.generated +++ /dev/null @@ -1,21 +0,0 @@ -shell> curl -X PATCH --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/gharial/social/vertex/female/alice <<EOF -{ - "age" : 26 -} -EOF - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -etag: _YOn1Hjy--J -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 202, - "vertex" : { - "_id" : "female/alice", - "_key" : "alice", - "_oldRev" : "_YOn1Hju--_", - "_rev" : "_YOn1Hjy--J" - } -} diff --git a/Documentation/Examples/HttpGharialPatchEdge.generated b/Documentation/Examples/HttpGharialPatchEdge.generated deleted file mode 100644 index 5b1119d4e7a9..000000000000 --- a/Documentation/Examples/HttpGharialPatchEdge.generated +++ /dev/null @@ -1,21 +0,0 @@ -shell> curl -X PATCH --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/gharial/social/edge/relation/102283 <<EOF -{ - "since" : "01.01.2001" -} -EOF - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -etag: _YOn1HnK--_ -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 202, - "edge" : { - "_id" : "relation/102283", - "_key" : "102283", - "_oldRev" : "_YOn1HnG--D", - "_rev" : "_YOn1HnK--_" - } -} diff --git a/Documentation/Examples/HttpGharialPutEdge.generated b/Documentation/Examples/HttpGharialPutEdge.generated deleted file mode 100644 index bc4b3937de60..000000000000 --- a/Documentation/Examples/HttpGharialPutEdge.generated +++ /dev/null @@ -1,23 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/gharial/social/edge/relation/102360 <<EOF -{ - "type" : "divorced", - "_from" : "female/alice", - "_to" : "male/bob" -} -EOF - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -etag: _YOn1HqS--_ -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 202, - "edge" : { - "_id" : "relation/102360", - "_key" : "102360", - "_oldRev" : "_YOn1HqO--D", - "_rev" : "_YOn1HqS--_" - } -} diff --git a/Documentation/Examples/HttpGharialRemoveVertexCollection.generated b/Documentation/Examples/HttpGharialRemoveVertexCollection.generated deleted file mode 100644 index 1ee466ecea8e..000000000000 --- a/Documentation/Examples/HttpGharialRemoveVertexCollection.generated +++ /dev/null @@ -1,34 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/gharial/social/vertex/otherVertices - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -etag: _YOn1Htu--_ -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 202, - "graph" : { - "_key" : "social", - "numberOfShards" : 1, - "replicationFactor" : 1, - "isSmart" : false, - "edgeDefinitions" : [ - { - "collection" : "relation", - "from" : [ - "female", - "male" - ], - "to" : [ - "female", - "male" - ] - } - ], - "orphanCollections" : [ ], - "_rev" : "_YOn1Htu--_", - "_id" : "_graphs/social", - "name" : "social" - } -} diff --git a/Documentation/Examples/HttpGharialRemoveVertexCollectionFailed.generated b/Documentation/Examples/HttpGharialRemoveVertexCollectionFailed.generated deleted file mode 100644 index 077920aaa05e..000000000000 --- a/Documentation/Examples/HttpGharialRemoveVertexCollectionFailed.generated +++ /dev/null @@ -1,12 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/gharial/social/vertex/male - -HTTP/1.1 Bad Request -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "not in orphan collection", - "code" : 400, - "errorNum" : 1928 -} diff --git a/Documentation/Examples/HttpGharialReplaceEdgeCol.generated b/Documentation/Examples/HttpGharialReplaceEdgeCol.generated deleted file mode 100644 index e28b25eabd2e..000000000000 --- a/Documentation/Examples/HttpGharialReplaceEdgeCol.generated +++ /dev/null @@ -1,50 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/gharial/social/edge/relation <<EOF -{ - "collection" : "relation", - "from" : [ - "female", - "male", - "animal" - ], - "to" : [ - "female", - "male", - "animal" - ] -} -EOF - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -etag: _YOn1H1K--B -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 202, - "graph" : { - "_key" : "social", - "numberOfShards" : 1, - "replicationFactor" : 1, - "isSmart" : false, - "edgeDefinitions" : [ - { - "collection" : "relation", - "from" : [ - "animal", - "female", - "male" - ], - "to" : [ - "animal", - "female", - "male" - ] - } - ], - "orphanCollections" : [ ], - "_rev" : "_YOn1H1K--B", - "_id" : "_graphs/social", - "name" : "social" - } -} diff --git a/Documentation/Examples/HttpGharialReplaceVertex.generated b/Documentation/Examples/HttpGharialReplaceVertex.generated deleted file mode 100644 index 2124dc2054c3..000000000000 --- a/Documentation/Examples/HttpGharialReplaceVertex.generated +++ /dev/null @@ -1,22 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/gharial/social/vertex/female/alice <<EOF -{ - "name" : "Alice Cooper", - "age" : 26 -} -EOF - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -etag: _YOn1H5C--D -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 202, - "vertex" : { - "_id" : "female/alice", - "_key" : "alice", - "_oldRev" : "_YOn1H5---_", - "_rev" : "_YOn1H5C--D" - } -} diff --git a/Documentation/Examples/IndexHandle.generated b/Documentation/Examples/IndexHandle.generated deleted file mode 100644 index 5b0ea8670750..000000000000 --- a/Documentation/Examples/IndexHandle.generated +++ /dev/null @@ -1,44 +0,0 @@ -arangosh> db.example.ensureIndex({ type: "skiplist", fields: [ "a", "b" ] }); -{ - "deduplicate" : true, - "fields" : [ - "a", - "b" - ], - "id" : "example/102732", - "isNewlyCreated" : true, - "sparse" : false, - "type" : "skiplist", - "unique" : false, - "code" : 201 -} -arangosh> var indexInfo = db.example.getIndexes().map(function(x) { return x.id; }); -arangosh> indexInfo; -[ - "example/0", - "example/102732" -] -arangosh> db._index(indexInfo[0]) -{ - "fields" : [ - "_key" - ], - "id" : "example/0", - "sparse" : false, - "type" : "primary", - "unique" : true, - "code" : 200 -} -arangosh> db._index(indexInfo[1]) -{ - "deduplicate" : true, - "fields" : [ - "a", - "b" - ], - "id" : "example/102732", - "sparse" : false, - "type" : "skiplist", - "unique" : false, - "code" : 200 -} diff --git a/Documentation/Examples/IndexVerify.generated b/Documentation/Examples/IndexVerify.generated deleted file mode 100644 index 8a4dcb8d761a..000000000000 --- a/Documentation/Examples/IndexVerify.generated +++ /dev/null @@ -1,36 +0,0 @@ -arangosh> var explain = require("@arangodb/aql/explainer").explain; -arangosh> db.example.ensureIndex({ type: "skiplist", fields: [ "a", "b" ] }); -{ - "deduplicate" : true, - "fields" : [ - "a", - "b" - ], - "id" : "example/102749", - "isNewlyCreated" : true, - "sparse" : false, - "type" : "skiplist", - "unique" : false, - "code" : 201 -} -arangosh> explain("FOR doc IN example FILTER doc.a < 23 RETURN doc", {colors:false}); -Query String: - FOR doc IN example FILTER doc.a < 23 RETURN doc - -Execution plan: - Id NodeType Est. Comment - 1 SingletonNode 1 * ROOT - 6 IndexNode 1 - FOR doc IN example /* skiplist index scan */ - 5 ReturnNode 1 - RETURN doc - -Indexes used: - By Type Collection Unique Sparse Selectivity Fields Ranges - 6 skiplist example false false n/a [ `a`, `b` ] (doc.`a` < 23) - -Optimization rules applied: - Id RuleName - 1 use-indexes - 2 remove-filter-covered-by-index - 3 remove-unnecessary-calculations-2 - - diff --git a/Documentation/Examples/QUERY_01_properyOfQueries.generated b/Documentation/Examples/QUERY_01_properyOfQueries.generated deleted file mode 100644 index 45b97c4ddcff..000000000000 --- a/Documentation/Examples/QUERY_01_properyOfQueries.generated +++ /dev/null @@ -1,34 +0,0 @@ -arangosh> var queries = require("@arangodb/aql/queries"); -arangosh> queries.properties(); -{ - "code" : 200, - "enabled" : true, - "trackSlowQueries" : true, - "trackBindVars" : true, - "maxSlowQueries" : 64, - "slowQueryThreshold" : 10, - "slowStreamingQueryThreshold" : 10, - "maxQueryStringLength" : 4096 -} -arangosh> queries.properties({slowQueryThreshold: 1}); -{ - "code" : 200, - "enabled" : true, - "trackSlowQueries" : true, - "trackBindVars" : true, - "maxSlowQueries" : 64, - "slowQueryThreshold" : 1, - "slowStreamingQueryThreshold" : 10, - "maxQueryStringLength" : 4096 -} -arangosh> queries.properties({slowStreamingQueryThreshold: 1}); -{ - "code" : 200, - "enabled" : true, - "trackSlowQueries" : true, - "trackBindVars" : true, - "maxSlowQueries" : 64, - "slowQueryThreshold" : 1, - "slowStreamingQueryThreshold" : 1, - "maxQueryStringLength" : 4096 -} diff --git a/Documentation/Examples/QUERY_02_listQueries.generated b/Documentation/Examples/QUERY_02_listQueries.generated deleted file mode 100644 index 535e0c24af7d..000000000000 --- a/Documentation/Examples/QUERY_02_listQueries.generated +++ /dev/null @@ -1,29 +0,0 @@ -arangosh> var theQuery = 'FOR sleepLoooong IN 1..5 LET sleepLoooonger = SLEEP(1000) RETURN sleepLoooong'; -arangosh> var tasks = require("@arangodb/tasks"); -arangosh> tasks.register({ -........> id: "mytask-1", -........> name: "this is a sample task to spawn a slow aql query", -........> command: "require('@arangodb').db._query('" + theQuery + "');" -........> }); -{ - "id" : "mytask-1", - "name" : "this is a sample task to spawn a slow aql query", - "created" : 1550658775.0549188, - "type" : "timed", - "offset" : 0, - "command" : "(function (params) { require('@arangodb').db._query('FOR sleepLoooong IN 1..5 LET sleepLoooonger = SLEEP(1000) RETURN sleepLoooong'); } )(params);", - "database" : "_system" -} -arangosh> queries.current(); -[ - { - "id" : "346", - "query" : "FOR sleepLoooong IN 1..5 LET sleepLoooonger = SLEEP(1000) RETURN sleepLoooong", - "bindVars" : { - }, - "started" : "2019-02-20T10:32:55Z", - "runTime" : 2.0032975673675537, - "state" : "executing", - "stream" : false - } -] diff --git a/Documentation/Examples/QUERY_03_listSlowQueries.generated b/Documentation/Examples/QUERY_03_listSlowQueries.generated deleted file mode 100644 index e9f85bbf7535..000000000000 --- a/Documentation/Examples/QUERY_03_listSlowQueries.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> queries.slow(); -[ ] diff --git a/Documentation/Examples/QUERY_04_clearSlowQueries.generated b/Documentation/Examples/QUERY_04_clearSlowQueries.generated deleted file mode 100644 index 2afb7986f45d..000000000000 --- a/Documentation/Examples/QUERY_04_clearSlowQueries.generated +++ /dev/null @@ -1,6 +0,0 @@ -arangosh> queries.clearSlow(); -{ - "code" : 200 -} -arangosh> queries.slow(); -[ ] diff --git a/Documentation/Examples/QUERY_05_killQueries.generated b/Documentation/Examples/QUERY_05_killQueries.generated deleted file mode 100644 index 9b83180c7fdf..000000000000 --- a/Documentation/Examples/QUERY_05_killQueries.generated +++ /dev/null @@ -1,7 +0,0 @@ -arangosh> var runningQueries = queries.current().filter(function(query) { -........> return query.query === theQuery; -........> }); -arangosh> queries.kill(runningQueries[0].id); -{ - "code" : 200 -} diff --git a/Documentation/Examples/RestAdminStatistics1.generated b/Documentation/Examples/RestAdminStatistics1.generated deleted file mode 100644 index c16d8be77ab3..000000000000 --- a/Documentation/Examples/RestAdminStatistics1.generated +++ /dev/null @@ -1,138 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_admin/statistics - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "time" : 1550658777.0711348, - "enabled" : true, - "system" : { - "minorPageFaults" : 82857, - "majorPageFaults" : 3, - "userTime" : 3.56, - "systemTime" : 1.71, - "numberOfThreads" : 50, - "residentSize" : 361426944, - "residentSizePercent" : 0.021646172810734898, - "virtualSize" : 1353252864 - }, - "client" : { - "httpConnections" : 1, - "connectionTime" : { - "sum" : 0, - "count" : 0, - "counts" : [ - 0, - 0, - 0, - 0 - ] - }, - "totalTime" : { - "sum" : 8.05157732963562, - "count" : 34249, - "counts" : [ - 34044, - 202, - 2, - 0, - 0, - 1, - 0 - ] - }, - "requestTime" : { - "sum" : 4.962236642837524, - "count" : 34249, - "counts" : [ - 34120, - 126, - 2, - 0, - 0, - 1, - 0 - ] - }, - "queueTime" : { - "sum" : 0, - "count" : 0, - "counts" : [ - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ] - }, - "ioTime" : { - "sum" : 3.0893406867980957, - "count" : 34249, - "counts" : [ - 34173, - 76, - 0, - 0, - 0, - 0, - 0 - ] - }, - "bytesSent" : { - "sum" : 10108900, - "count" : 34249, - "counts" : [ - 240, - 33648, - 332, - 28, - 1, - 0 - ] - }, - "bytesReceived" : { - "sum" : 8043129, - "count" : 34249, - "counts" : [ - 33568, - 681, - 0, - 0, - 0, - 0 - ] - } - }, - "http" : { - "requestsTotal" : 34249, - "requestsAsync" : 0, - "requestsGet" : 795, - "requestsHead" : 0, - "requestsPost" : 33323, - "requestsPut" : 31, - "requestsPatch" : 2, - "requestsDelete" : 98, - "requestsOptions" : 0, - "requestsOther" : 0 - }, - "server" : { - "uptime" : 14.283366203308105, - "physicalMemory" : 16697036800, - "v8Context" : { - "available" : 2, - "busy" : 1, - "dirty" : 0, - "free" : 1, - "max" : 16 - }, - "threads" : { - "scheduler-threads" : 2, - "queued" : 2 - } - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestAdminStatisticsDescription1.generated b/Documentation/Examples/RestAdminStatisticsDescription1.generated deleted file mode 100644 index 4cb8941f8bf2..000000000000 --- a/Documentation/Examples/RestAdminStatisticsDescription1.generated +++ /dev/null @@ -1,293 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_admin/statistics-description - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "groups" : [ - { - "group" : "system", - "name" : "Process Statistics", - "description" : "Statistics about the ArangoDB process" - }, - { - "group" : "client", - "name" : "Client Connection Statistics", - "description" : "Statistics about the connections." - }, - { - "group" : "http", - "name" : "HTTP Request Statistics", - "description" : "Statistics about the HTTP requests." - }, - { - "group" : "server", - "name" : "Server Statistics", - "description" : "Statistics about the ArangoDB server" - } - ], - "figures" : [ - { - "group" : "system", - "identifier" : "userTime", - "name" : "User Time", - "description" : "Amount of time that this process has been scheduled in user mode, measured in seconds.", - "type" : "accumulated", - "units" : "seconds" - }, - { - "group" : "system", - "identifier" : "systemTime", - "name" : "System Time", - "description" : "Amount of time that this process has been scheduled in kernel mode, measured in seconds.", - "type" : "accumulated", - "units" : "seconds" - }, - { - "group" : "system", - "identifier" : "numberOfThreads", - "name" : "Number of Threads", - "description" : "Number of threads in the arangod process.", - "type" : "current", - "units" : "number" - }, - { - "group" : "system", - "identifier" : "residentSize", - "name" : "Resident Set Size", - "description" : "The total size of the number of pages the process has in real memory. This is just the pages which count toward text, data, or stack space. This does not include pages which have not been demand-loaded in, or which are swapped out. The resident set size is reported in bytes.", - "type" : "current", - "units" : "bytes" - }, - { - "group" : "system", - "identifier" : "residentSizePercent", - "name" : "Resident Set Size", - "description" : "The percentage of physical memory used by the process as resident set size.", - "type" : "current", - "units" : "percent" - }, - { - "group" : "system", - "identifier" : "virtualSize", - "name" : "Virtual Memory Size", - "description" : "On Windows, this figure contains the total amount of memory that the memory manager has committed for the arangod process. On other systems, this figure contains The size of the virtual memory the process is using.", - "type" : "current", - "units" : "bytes" - }, - { - "group" : "system", - "identifier" : "minorPageFaults", - "name" : "Minor Page Faults", - "description" : "The number of minor faults the process has made which have not required loading a memory page from disk. This figure is not reported on Windows.", - "type" : "accumulated", - "units" : "number" - }, - { - "group" : "system", - "identifier" : "majorPageFaults", - "name" : "Major Page Faults", - "description" : "On Windows, this figure contains the total number of page faults. On other system, this figure contains the number of major faults the process has made which have required loading a memory page from disk.", - "type" : "accumulated", - "units" : "number" - }, - { - "group" : "client", - "identifier" : "httpConnections", - "name" : "Client Connections", - "description" : "The number of connections that are currently open.", - "type" : "current", - "units" : "number" - }, - { - "group" : "client", - "identifier" : "totalTime", - "name" : "Total Time", - "description" : "Total time needed to answer a request.", - "type" : "distribution", - "cuts" : [ - 0.01, - 0.05, - 0.1, - 0.2, - 0.5, - 1 - ], - "units" : "seconds" - }, - { - "group" : "client", - "identifier" : "requestTime", - "name" : "Request Time", - "description" : "Request time needed to answer a request.", - "type" : "distribution", - "cuts" : [ - 0.01, - 0.05, - 0.1, - 0.2, - 0.5, - 1 - ], - "units" : "seconds" - }, - { - "group" : "client", - "identifier" : "queueTime", - "name" : "Queue Time", - "description" : "Queue time needed to answer a request.", - "type" : "distribution", - "cuts" : [ - 0.01, - 0.05, - 0.1, - 0.2, - 0.5, - 1 - ], - "units" : "seconds" - }, - { - "group" : "client", - "identifier" : "bytesSent", - "name" : "Bytes Sent", - "description" : "Bytes sents for a request.", - "type" : "distribution", - "cuts" : [ - 250, - 1000, - 2000, - 5000, - 10000 - ], - "units" : "bytes" - }, - { - "group" : "client", - "identifier" : "bytesReceived", - "name" : "Bytes Received", - "description" : "Bytes received for a request.", - "type" : "distribution", - "cuts" : [ - 250, - 1000, - 2000, - 5000, - 10000 - ], - "units" : "bytes" - }, - { - "group" : "client", - "identifier" : "connectionTime", - "name" : "Connection Time", - "description" : "Total connection time of a client.", - "type" : "distribution", - "cuts" : [ - 0.1, - 1, - 60 - ], - "units" : "seconds" - }, - { - "group" : "http", - "identifier" : "requestsTotal", - "name" : "Total requests", - "description" : "Total number of HTTP requests.", - "type" : "accumulated", - "units" : "number" - }, - { - "group" : "http", - "identifier" : "requestsAsync", - "name" : "Async requests", - "description" : "Number of asynchronously executed HTTP requests.", - "type" : "accumulated", - "units" : "number" - }, - { - "group" : "http", - "identifier" : "requestsGet", - "name" : "HTTP GET requests", - "description" : "Number of HTTP GET requests.", - "type" : "accumulated", - "units" : "number" - }, - { - "group" : "http", - "identifier" : "requestsHead", - "name" : "HTTP HEAD requests", - "description" : "Number of HTTP HEAD requests.", - "type" : "accumulated", - "units" : "number" - }, - { - "group" : "http", - "identifier" : "requestsPost", - "name" : "HTTP POST requests", - "description" : "Number of HTTP POST requests.", - "type" : "accumulated", - "units" : "number" - }, - { - "group" : "http", - "identifier" : "requestsPut", - "name" : "HTTP PUT requests", - "description" : "Number of HTTP PUT requests.", - "type" : "accumulated", - "units" : "number" - }, - { - "group" : "http", - "identifier" : "requestsPatch", - "name" : "HTTP PATCH requests", - "description" : "Number of HTTP PATCH requests.", - "type" : "accumulated", - "units" : "number" - }, - { - "group" : "http", - "identifier" : "requestsDelete", - "name" : "HTTP DELETE requests", - "description" : "Number of HTTP DELETE requests.", - "type" : "accumulated", - "units" : "number" - }, - { - "group" : "http", - "identifier" : "requestsOptions", - "name" : "HTTP OPTIONS requests", - "description" : "Number of HTTP OPTIONS requests.", - "type" : "accumulated", - "units" : "number" - }, - { - "group" : "http", - "identifier" : "requestsOther", - "name" : "other HTTP requests", - "description" : "Number of other HTTP requests.", - "type" : "accumulated", - "units" : "number" - }, - { - "group" : "server", - "identifier" : "uptime", - "name" : "Server Uptime", - "description" : "Number of seconds elapsed since server start.", - "type" : "current", - "units" : "seconds" - }, - { - "group" : "server", - "identifier" : "physicalMemory", - "name" : "Physical Memory", - "description" : "Physical memory in bytes.", - "type" : "current", - "units" : "bytes" - } - ], - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestAnalyzerDelete.generated b/Documentation/Examples/RestAnalyzerDelete.generated deleted file mode 100644 index 01627cca1241..000000000000 --- a/Documentation/Examples/RestAnalyzerDelete.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/analyzer/_system%3A%3AtestAnalyzer - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "name" : "_system::testAnalyzer" -} diff --git a/Documentation/Examples/RestAnalyzerDeleteForce.generated b/Documentation/Examples/RestAnalyzerDeleteForce.generated deleted file mode 100644 index 6407e4e9d7fb..000000000000 --- a/Documentation/Examples/RestAnalyzerDeleteForce.generated +++ /dev/null @@ -1,33 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/collection <<EOF -{ - "name" : "testCollection" -} -EOF - -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/view <<EOF -{ - "name" : "testView", - "type" : "arangosearch", - "links" : { - "testCollection" : { - "analyzers" : [ - "_system::testAnalyzer" - ] - } - } -} -EOF - -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/analyzer/_system%3A%3AtestAnalyzer?force=false - -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/analyzer/_system%3A%3AtestAnalyzer?force=true - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "name" : "_system::testAnalyzer" -} diff --git a/Documentation/Examples/RestAnalyzerGet.generated b/Documentation/Examples/RestAnalyzerGet.generated deleted file mode 100644 index 9dae78c957dc..000000000000 --- a/Documentation/Examples/RestAnalyzerGet.generated +++ /dev/null @@ -1,14 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/analyzer/_system%3A%3AtestAnalyzer - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "type" : "identity", - "properties" : "test properties", - "features" : [ ], - "name" : "_system::testAnalyzer" -} diff --git a/Documentation/Examples/RestAnalyzerPost.generated b/Documentation/Examples/RestAnalyzerPost.generated deleted file mode 100644 index 899471524d12..000000000000 --- a/Documentation/Examples/RestAnalyzerPost.generated +++ /dev/null @@ -1,17 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/analyzer <<EOF -{ - "name" : "_system::testAnalyzer", - "type" : "identity" -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "name" : "_system::testAnalyzer", - "type" : "identity", - "properties" : null, - "features" : [ ] -} diff --git a/Documentation/Examples/RestAnalyzersGet.generated b/Documentation/Examples/RestAnalyzersGet.generated deleted file mode 100644 index f72fee8cb5a7..000000000000 --- a/Documentation/Examples/RestAnalyzersGet.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/analyzer - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "result" : [ ] -} diff --git a/Documentation/Examples/RestAqlfunctionCreate.generated b/Documentation/Examples/RestAqlfunctionCreate.generated deleted file mode 100644 index 26c4d86fd730..000000000000 --- a/Documentation/Examples/RestAqlfunctionCreate.generated +++ /dev/null @@ -1,17 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/aqlfunction <<EOF -{ - "name" : "myfunctions::temperature::celsiustofahrenheit", - "code" : "function (celsius) { return celsius * 1.8 + 32; }", - "isDeterministic" : true -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 201, - "isNewlyCreated" : true -} diff --git a/Documentation/Examples/RestAqlfunctionDelete.generated b/Documentation/Examples/RestAqlfunctionDelete.generated deleted file mode 100644 index 562331e7ecb8..000000000000 --- a/Documentation/Examples/RestAqlfunctionDelete.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/aqlfunction/square::x::y - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "deletedCount" : 1 -} diff --git a/Documentation/Examples/RestAqlfunctionDeleteFails.generated b/Documentation/Examples/RestAqlfunctionDeleteFails.generated deleted file mode 100644 index da26db44e16a..000000000000 --- a/Documentation/Examples/RestAqlfunctionDeleteFails.generated +++ /dev/null @@ -1,12 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/aqlfunction/myfunction::x::y - -HTTP/1.1 Not Found -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "no AQL user function with name 'myfunction::x::y' found", - "code" : 404, - "errorNum" : 1582 -} diff --git a/Documentation/Examples/RestAqlfunctionsGetAll.generated b/Documentation/Examples/RestAqlfunctionsGetAll.generated deleted file mode 100644 index 8229946b9708..000000000000 --- a/Documentation/Examples/RestAqlfunctionsGetAll.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/aqlfunction/test - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "result" : [ ] -} diff --git a/Documentation/Examples/RestBatchImplicitBoundary.generated b/Documentation/Examples/RestBatchImplicitBoundary.generated deleted file mode 100644 index 5e3e985fb7c0..000000000000 --- a/Documentation/Examples/RestBatchImplicitBoundary.generated +++ /dev/null @@ -1,52 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/batch <<EOF ---SomeBoundaryValue -Content-Type: application/x-arango-batchpart - -DELETE /_api/collection/notexisting1 HTTP/1.1 - ---SomeBoundaryValue -Content-Type: application/x-arango-batchpart - -DELETE _api/collection/notexisting2 HTTP/1.1 ---SomeBoundaryValue-- - -EOF - -HTTP/1.1 OK -content-type: application/json -x-arango-errors: 2 -x-content-type-options: nosniff - ---SomeBoundaryValue -Content-Type: application/x-arango-batchpart - -HTTP/1.1 404 Not Found -Server: -Connection: -Content-Type: application/json; charset=utf-8 -Content-Length: 87 - -{ - "code" : 404, - "error" : true, - "errorMessage" : "collection or view not found", - "errorNum" : 1203 -}↩ - ---SomeBoundaryValue -Content-Type: application/x-arango-batchpart - -HTTP/1.1 404 Not Found -Server: -Connection: -Content-Type: application/json; charset=utf-8 -Content-Length: 101 - -{ - "error" : true, - "code" : 404, - "errorNum" : 404, - "errorMessage" : "unknown path '_api/collection/notexisting2'" -}↩ - ---SomeBoundaryValue-- diff --git a/Documentation/Examples/RestBatchMultipartHeader.generated b/Documentation/Examples/RestBatchMultipartHeader.generated deleted file mode 100644 index bff448498cd9..000000000000 --- a/Documentation/Examples/RestBatchMultipartHeader.generated +++ /dev/null @@ -1,200 +0,0 @@ -shell> curl -X POST --header 'Content-Type: multipart/form-data; boundary=SomeBoundaryValue' --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/batch <<EOF ---SomeBoundaryValue -Content-Type: application/x-arango-batchpart -Content-Id: myId1 - -GET /_api/version HTTP/1.1 - ---SomeBoundaryValue -Content-Type: application/x-arango-batchpart -Content-Id: myId2 - -DELETE /_api/collection/products HTTP/1.1 - ---SomeBoundaryValue -Content-Type: application/x-arango-batchpart -Content-Id: someId - -POST /_api/collection/products HTTP/1.1 - -{"name": "products" } - ---SomeBoundaryValue -Content-Type: application/x-arango-batchpart -Content-Id: nextId - -GET /_api/collection/products/figures HTTP/1.1 - ---SomeBoundaryValue -Content-Type: application/x-arango-batchpart -Content-Id: otherId - -DELETE /_api/collection/products HTTP/1.1 ---SomeBoundaryValue-- - -EOF - -HTTP/1.1 OK -content-type: application/json -x-arango-errors: 1 -x-content-type-options: nosniff - ---SomeBoundaryValue -Content-Type: application/x-arango-batchpart -Content-Id: myId1 - -HTTP/1.1 200 OK -Server: -Connection: -Content-Type: application/json; charset=utf-8 -Content-Length: 65 - -{ - "server" : "arango", - "license" : "community", - "version" : "3.6.0-devel" -}↩ - ---SomeBoundaryValue -Content-Type: application/x-arango-batchpart -Content-Id: myId2 - -HTTP/1.1 404 Not Found -Server: -Connection: -Content-Type: application/json; charset=utf-8 -Content-Length: 87 - -{ - "code" : 404, - "error" : true, - "errorMessage" : "collection or view not found", - "errorNum" : 1203 -}↩ - ---SomeBoundaryValue -Content-Type: application/x-arango-batchpart -Content-Id: someId - -HTTP/1.1 200 OK -Server: -Connection: -Content-Type: application/json; charset=utf-8 -Content-Length: 322 - -{ - "error" : false, - "code" : 200, - "waitForSync" : false, - "type" : 2, - "status" : 3, - "journalSize" : 33554432, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "globallyUniqueId" : "h5E72DF2A848A/111", - "statusString" : "loaded", - "id" : "111", - "name" : "products", - "doCompact" : true, - "isSystem" : false, - "indexBuckets" : 8, - "isVolatile" : false -}↩ - ---SomeBoundaryValue -Content-Type: application/x-arango-batchpart -Content-Id: nextId - -HTTP/1.1 200 OK -Server: -Location: /_api/collection/products/figures -Connection: -Content-Type: application/json; charset=utf-8 -Content-Length: 829 - -{ - "error" : false, - "code" : 200, - "type" : 2, - "status" : 3, - "journalSize" : 33554432, - "isVolatile" : false, - "name" : "products", - "doCompact" : true, - "isSystem" : false, - "count" : 0, - "waitForSync" : false, - "figures" : { - "indexes" : { - "count" : 1, - "size" : 32128 - }, - "documentReferences" : 0, - "waitingFor" : "-", - "alive" : { - "count" : 0, - "size" : 0 - }, - "dead" : { - "count" : 0, - "size" : 0, - "deletion" : 0 - }, - "compactionStatus" : { - "message" : "compaction not yet started", - "time" : "2019-07-17T12:01:44Z", - "count" : 0, - "filesCombined" : 0, - "bytesRead" : 0, - "bytesWritten" : 0 - }, - "datafiles" : { - "count" : 0, - "fileSize" : 0 - }, - "journals" : { - "count" : 0, - "fileSize" : 0 - }, - "compactors" : { - "count" : 0, - "fileSize" : 0 - }, - "revisions" : { - "count" : 0, - "size" : 48192 - }, - "lastTick" : 0, - "uncollectedLogfileEntries" : 0 - }, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "globallyUniqueId" : "h5E72DF2A848A/111", - "statusString" : "loaded", - "id" : "111", - "indexBuckets" : 8 -}↩ - ---SomeBoundaryValue -Content-Type: application/x-arango-batchpart -Content-Id: otherId - -HTTP/1.1 200 OK -Server: -Connection: -Content-Type: application/json; charset=utf-8 -Content-Length: 37 - -{ - "error" : false, - "code" : 200, - "id" : "111" -}↩ - ---SomeBoundaryValue-- diff --git a/Documentation/Examples/RestCollectionCreateCollection.generated b/Documentation/Examples/RestCollectionCreateCollection.generated deleted file mode 100644 index 3cf648a48be5..000000000000 --- a/Documentation/Examples/RestCollectionCreateCollection.generated +++ /dev/null @@ -1,63 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/collection <<EOF -{ - "name" : "testCollectionBasics" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "waitForSync" : false, - "type" : 2, - "status" : 3, - "journalSize" : 33554432, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "globallyUniqueId" : "h8B2B671BCFD0/102801", - "statusString" : "loaded", - "id" : "102801", - "name" : "testCollectionBasics", - "doCompact" : true, - "isSystem" : false, - "indexBuckets" : 8, - "isVolatile" : false -} -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/collection <<EOF -{ - "name" : "testCollectionEdges", - "type" : 3 -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "waitForSync" : false, - "type" : 3, - "status" : 3, - "journalSize" : 33554432, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "globallyUniqueId" : "h8B2B671BCFD0/102807", - "statusString" : "loaded", - "id" : "102807", - "name" : "testCollectionEdges", - "doCompact" : true, - "isSystem" : false, - "indexBuckets" : 8, - "isVolatile" : false -} diff --git a/Documentation/Examples/RestCollectionCreateKeyopt.generated b/Documentation/Examples/RestCollectionCreateKeyopt.generated deleted file mode 100644 index 49705d5b9111..000000000000 --- a/Documentation/Examples/RestCollectionCreateKeyopt.generated +++ /dev/null @@ -1,38 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/collection <<EOF -{ - "name" : "testCollectionUsers", - "keyOptions" : { - "type" : "autoincrement", - "increment" : 5, - "allowUserKeys" : true - } -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "waitForSync" : false, - "type" : 2, - "status" : 3, - "journalSize" : 33554432, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "autoincrement", - "offset" : 0, - "increment" : 5, - "lastValue" : 0 - }, - "globallyUniqueId" : "h8B2B671BCFD0/102822", - "statusString" : "loaded", - "id" : "102822", - "name" : "testCollectionUsers", - "doCompact" : true, - "isSystem" : false, - "indexBuckets" : 8, - "isVolatile" : false -} diff --git a/Documentation/Examples/RestCollectionDeleteCollectionIdentifier.generated b/Documentation/Examples/RestCollectionDeleteCollectionIdentifier.generated deleted file mode 100644 index fbb05615e5c9..000000000000 --- a/Documentation/Examples/RestCollectionDeleteCollectionIdentifier.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/collection/102833 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "id" : "102833" -} diff --git a/Documentation/Examples/RestCollectionDeleteCollectionName.generated b/Documentation/Examples/RestCollectionDeleteCollectionName.generated deleted file mode 100644 index 0d26a48d2664..000000000000 --- a/Documentation/Examples/RestCollectionDeleteCollectionName.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/collection/products1 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "id" : "102844" -} diff --git a/Documentation/Examples/RestCollectionDeleteCollectionSystem.generated b/Documentation/Examples/RestCollectionDeleteCollectionSystem.generated deleted file mode 100644 index a0257bdfd9c1..000000000000 --- a/Documentation/Examples/RestCollectionDeleteCollectionSystem.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/collection/_example?isSystem=true - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "id" : "102855" -} diff --git a/Documentation/Examples/RestCollectionGetAllCollections.generated b/Documentation/Examples/RestCollectionGetAllCollections.generated deleted file mode 100644 index b07bcb45088c..000000000000 --- a/Documentation/Examples/RestCollectionGetAllCollections.generated +++ /dev/null @@ -1,124 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/collection - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "result" : [ - { - "id" : "17", - "name" : "_queues", - "status" : 3, - "type" : 2, - "isSystem" : true, - "globallyUniqueId" : "_queues" - }, - { - "id" : "15", - "name" : "_frontend", - "status" : 3, - "type" : 2, - "isSystem" : true, - "globallyUniqueId" : "_frontend" - }, - { - "id" : "32", - "name" : "_appbundles", - "status" : 3, - "type" : 2, - "isSystem" : true, - "globallyUniqueId" : "_appbundles" - }, - { - "id" : "66", - "name" : "_statistics", - "status" : 3, - "type" : 2, - "isSystem" : true, - "globallyUniqueId" : "_statistics" - }, - { - "id" : "8", - "name" : "_users", - "status" : 3, - "type" : 2, - "isSystem" : true, - "globallyUniqueId" : "_users" - }, - { - "id" : "2", - "name" : "_iresearch_analyzers", - "status" : 3, - "type" : 2, - "isSystem" : true, - "globallyUniqueId" : "_iresearch_analyzers" - }, - { - "id" : "19", - "name" : "_jobs", - "status" : 3, - "type" : 2, - "isSystem" : true, - "globallyUniqueId" : "_jobs" - }, - { - "id" : "87", - "name" : "demo", - "status" : 3, - "type" : 2, - "isSystem" : false, - "globallyUniqueId" : "h8B2B671BCFD0/87" - }, - { - "id" : "13", - "name" : "_aqlfunctions", - "status" : 3, - "type" : 2, - "isSystem" : true, - "globallyUniqueId" : "_aqlfunctions" - }, - { - "id" : "6", - "name" : "_graphs", - "status" : 3, - "type" : 2, - "isSystem" : true, - "globallyUniqueId" : "_graphs" - }, - { - "id" : "27", - "name" : "_apps", - "status" : 3, - "type" : 2, - "isSystem" : true, - "globallyUniqueId" : "_apps" - }, - { - "id" : "61", - "name" : "_statisticsRaw", - "status" : 3, - "type" : 2, - "isSystem" : true, - "globallyUniqueId" : "_statisticsRaw" - }, - { - "id" : "71", - "name" : "_statistics15", - "status" : 3, - "type" : 2, - "isSystem" : true, - "globallyUniqueId" : "_statistics15" - }, - { - "id" : "96", - "name" : "animals", - "status" : 3, - "type" : 2, - "isSystem" : false, - "globallyUniqueId" : "h8B2B671BCFD0/96" - } - ] -} diff --git a/Documentation/Examples/RestCollectionGetCollectionChecksum.generated b/Documentation/Examples/RestCollectionGetCollectionChecksum.generated deleted file mode 100644 index 9dfe715743e3..000000000000 --- a/Documentation/Examples/RestCollectionGetCollectionChecksum.generated +++ /dev/null @@ -1,19 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/collection/products/checksum - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -location: /_api/collection/products/checksum -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "globallyUniqueId" : "h8B2B671BCFD0/102861", - "isSystem" : false, - "id" : "102861", - "name" : "products", - "revision" : "_YOn1KHS--_", - "checksum" : "2089246606277080887", - "status" : 3, - "type" : 2 -} diff --git a/Documentation/Examples/RestCollectionGetCollectionChecksumNoRev.generated b/Documentation/Examples/RestCollectionGetCollectionChecksumNoRev.generated deleted file mode 100644 index 4660fc693c2e..000000000000 --- a/Documentation/Examples/RestCollectionGetCollectionChecksumNoRev.generated +++ /dev/null @@ -1,19 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/collection/products/checksum?withRevisions=false&withData=true - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -location: /_api/collection/products/checksum -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "globallyUniqueId" : "h8B2B671BCFD0/102877", - "isSystem" : false, - "id" : "102877", - "name" : "products", - "revision" : "_YOn1KJC--B", - "checksum" : "6947804677053586772", - "status" : 3, - "type" : 2 -} diff --git a/Documentation/Examples/RestCollectionGetCollectionCount.generated b/Documentation/Examples/RestCollectionGetCollectionCount.generated deleted file mode 100644 index 89ed098ad5b4..000000000000 --- a/Documentation/Examples/RestCollectionGetCollectionCount.generated +++ /dev/null @@ -1,29 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/collection/products/count - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -location: /_api/collection/products/count -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "waitForSync" : true, - "type" : 2, - "status" : 3, - "journalSize" : 33554432, - "isVolatile" : false, - "name" : "products", - "doCompact" : true, - "isSystem" : false, - "count" : 100, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 103198 - }, - "globallyUniqueId" : "h8B2B671BCFD0/102893", - "statusString" : "loaded", - "id" : "102893", - "indexBuckets" : 8 -} diff --git a/Documentation/Examples/RestCollectionGetCollectionFigures_mmfiles.generated b/Documentation/Examples/RestCollectionGetCollectionFigures_mmfiles.generated deleted file mode 100644 index 130be6450106..000000000000 --- a/Documentation/Examples/RestCollectionGetCollectionFigures_mmfiles.generated +++ /dev/null @@ -1,72 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/collection/products/figures - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -location: /_api/collection/products/figures -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "type" : 2, - "status" : 3, - "journalSize" : 33554432, - "isVolatile" : false, - "name" : "products", - "doCompact" : true, - "isSystem" : false, - "count" : 1, - "waitForSync" : false, - "figures" : { - "indexes" : { - "count" : 1, - "size" : 32128 - }, - "documentReferences" : 0, - "waitingFor" : "-", - "alive" : { - "count" : 1, - "size" : 88 - }, - "dead" : { - "count" : 0, - "size" : 0, - "deletion" : 0 - }, - "compactionStatus" : { - "message" : "skipped compaction because collection has no datafiles", - "time" : "2019-02-20T10:32:57Z", - "count" : 0, - "filesCombined" : 0, - "bytesRead" : 0, - "bytesWritten" : 0 - }, - "datafiles" : { - "count" : 0, - "fileSize" : 0 - }, - "journals" : { - "count" : 1, - "fileSize" : 33554432 - }, - "compactors" : { - "count" : 0, - "fileSize" : 0 - }, - "revisions" : { - "count" : 1, - "size" : 48192 - }, - "lastTick" : 103215, - "uncollectedLogfileEntries" : 0 - }, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 103213 - }, - "globallyUniqueId" : "h8B2B671BCFD0/103206", - "statusString" : "loaded", - "id" : "103206", - "indexBuckets" : 8 -} diff --git a/Documentation/Examples/RestCollectionGetCollectionFigures_rocksdb.generated b/Documentation/Examples/RestCollectionGetCollectionFigures_rocksdb.generated deleted file mode 100644 index 16a0b49b6117..000000000000 --- a/Documentation/Examples/RestCollectionGetCollectionFigures_rocksdb.generated +++ /dev/null @@ -1,37 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/collection/products/figures - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -location: /_api/collection/products/figures -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "type" : 2, - "status" : 3, - "name" : "products", - "waitForSync" : false, - "objectId" : "101", - "cacheEnabled" : false, - "figures" : { - "indexes" : { - "count" : 1, - "size" : 37 - }, - "documentsSize" : 846, - "cacheInUse" : false, - "cacheSize" : 0, - "cacheUsage" : 0 - }, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 107 - }, - "globallyUniqueId" : "h5FFC6746B13/102", - "statusString" : "loaded", - "id" : "102", - "count" : 1, - "isSystem" : false -} diff --git a/Documentation/Examples/RestCollectionGetCollectionIdentifier.generated b/Documentation/Examples/RestCollectionGetCollectionIdentifier.generated deleted file mode 100644 index 361b9c811e8e..000000000000 --- a/Documentation/Examples/RestCollectionGetCollectionIdentifier.generated +++ /dev/null @@ -1,28 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/collection/103234/properties - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -location: /_api/collection/103234/properties -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "waitForSync" : true, - "type" : 2, - "status" : 3, - "journalSize" : 33554432, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "globallyUniqueId" : "h8B2B671BCFD0/103234", - "statusString" : "loaded", - "id" : "103234", - "name" : "products", - "doCompact" : true, - "isSystem" : false, - "indexBuckets" : 8, - "isVolatile" : false -} diff --git a/Documentation/Examples/RestCollectionGetCollectionName.generated b/Documentation/Examples/RestCollectionGetCollectionName.generated deleted file mode 100644 index 959fe80bab7a..000000000000 --- a/Documentation/Examples/RestCollectionGetCollectionName.generated +++ /dev/null @@ -1,28 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/collection/products/properties - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -location: /_api/collection/products/properties -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "waitForSync" : true, - "type" : 2, - "status" : 3, - "journalSize" : 33554432, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "globallyUniqueId" : "h8B2B671BCFD0/103246", - "statusString" : "loaded", - "id" : "103246", - "name" : "products", - "doCompact" : true, - "isSystem" : false, - "indexBuckets" : 8, - "isVolatile" : false -} diff --git a/Documentation/Examples/RestCollectionGetCollectionRevision.generated b/Documentation/Examples/RestCollectionGetCollectionRevision.generated deleted file mode 100644 index 1f81baa02597..000000000000 --- a/Documentation/Examples/RestCollectionGetCollectionRevision.generated +++ /dev/null @@ -1,29 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/collection/products/revision - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -location: /_api/collection/products/revision -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "waitForSync" : false, - "journalSize" : 33554432, - "isVolatile" : false, - "isSystem" : false, - "indexBuckets" : 8, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "globallyUniqueId" : "h8B2B671BCFD0/103258", - "statusString" : "loaded", - "id" : "103258", - "revision" : "0", - "status" : 3, - "type" : 2, - "name" : "products", - "doCompact" : true -} diff --git a/Documentation/Examples/RestCollectionIdentifierLoad.generated b/Documentation/Examples/RestCollectionIdentifierLoad.generated deleted file mode 100644 index ca00293633e4..000000000000 --- a/Documentation/Examples/RestCollectionIdentifierLoad.generated +++ /dev/null @@ -1,18 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --dump - http://localhost:8529/_api/collection/products/load - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -location: /_api/collection/products/load -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "type" : 2, - "globallyUniqueId" : "h8B2B671BCFD0/103270", - "id" : "103270", - "count" : 0, - "isSystem" : false, - "name" : "products", - "status" : 3 -} diff --git a/Documentation/Examples/RestCollectionIdentifierLoadIndexesIntoMemory.generated b/Documentation/Examples/RestCollectionIdentifierLoadIndexesIntoMemory.generated deleted file mode 100644 index 0c674171242b..000000000000 --- a/Documentation/Examples/RestCollectionIdentifierLoadIndexesIntoMemory.generated +++ /dev/null @@ -1,12 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --dump - http://localhost:8529/_api/collection/products/loadIndexesIntoMemory - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -location: /_api/collection/products/loadIndexesIntoMemory -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "result" : true -} diff --git a/Documentation/Examples/RestCollectionIdentifierPropertiesSync.generated b/Documentation/Examples/RestCollectionIdentifierPropertiesSync.generated deleted file mode 100644 index 64604a102179..000000000000 --- a/Documentation/Examples/RestCollectionIdentifierPropertiesSync.generated +++ /dev/null @@ -1,32 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/collection/products/properties <<EOF -{ - "waitForSync" : true -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -location: /_api/collection/products/properties -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "waitForSync" : true, - "type" : 2, - "status" : 3, - "journalSize" : 33554432, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "globallyUniqueId" : "h8B2B671BCFD0/103295", - "statusString" : "loaded", - "id" : "103295", - "name" : "products", - "doCompact" : true, - "isSystem" : false, - "indexBuckets" : 8, - "isVolatile" : false -} diff --git a/Documentation/Examples/RestCollectionIdentifierRename.generated b/Documentation/Examples/RestCollectionIdentifierRename.generated deleted file mode 100644 index b5f41a09aacd..000000000000 --- a/Documentation/Examples/RestCollectionIdentifierRename.generated +++ /dev/null @@ -1,21 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/collection/products1/rename <<EOF -{ - "name" : "newname" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -location: /_api/collection/products1/rename -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "status" : 3, - "name" : "newname", - "type" : 2, - "isSystem" : false, - "globallyUniqueId" : "h8B2B671BCFD0/103309", - "id" : "103309" -} diff --git a/Documentation/Examples/RestCollectionIdentifierTruncate.generated b/Documentation/Examples/RestCollectionIdentifierTruncate.generated deleted file mode 100644 index 3620d4214b75..000000000000 --- a/Documentation/Examples/RestCollectionIdentifierTruncate.generated +++ /dev/null @@ -1,17 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --dump - http://localhost:8529/_api/collection/products/truncate - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -location: /_api/collection/products/truncate -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "status" : 3, - "name" : "products", - "type" : 2, - "isSystem" : false, - "globallyUniqueId" : "h8B2B671BCFD0/103319", - "id" : "103319" -} diff --git a/Documentation/Examples/RestCollectionIdentifierUnload.generated b/Documentation/Examples/RestCollectionIdentifierUnload.generated deleted file mode 100644 index 406d837705ab..000000000000 --- a/Documentation/Examples/RestCollectionIdentifierUnload.generated +++ /dev/null @@ -1,17 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --dump - http://localhost:8529/_api/collection/products/unload - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -location: /_api/collection/products/unload -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "status" : 4, - "name" : "products", - "type" : 2, - "isSystem" : false, - "globallyUniqueId" : "h8B2B671BCFD0/103332", - "id" : "103332" -} diff --git a/Documentation/Examples/RestCollectionRotateNoJournal_mmfiles.generated b/Documentation/Examples/RestCollectionRotateNoJournal_mmfiles.generated deleted file mode 100644 index e6e2cd7ef5ff..000000000000 --- a/Documentation/Examples/RestCollectionRotateNoJournal_mmfiles.generated +++ /dev/null @@ -1,15 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/collection/products/rotate <<EOF -{ -} -EOF - -HTTP/1.1 Bad Request -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "no journal", - "code" : 400, - "errorNum" : 1105 -} diff --git a/Documentation/Examples/RestCollectionRotate_mmfiles.generated b/Documentation/Examples/RestCollectionRotate_mmfiles.generated deleted file mode 100644 index cbab8000c180..000000000000 --- a/Documentation/Examples/RestCollectionRotate_mmfiles.generated +++ /dev/null @@ -1,15 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/collection/products/rotate <<EOF -{ -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -location: /_api/collection/products/rotate -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "result" : true -} diff --git a/Documentation/Examples/RestCreateUser.generated b/Documentation/Examples/RestCreateUser.generated deleted file mode 100644 index ac7e671a5486..000000000000 --- a/Documentation/Examples/RestCreateUser.generated +++ /dev/null @@ -1,19 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/user <<EOF -{ - "user" : "admin@example", - "passwd" : "secure" -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "user" : "admin@example", - "active" : true, - "extra" : { - }, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestCursorCreateCursorForLimitReturn.generated b/Documentation/Examples/RestCursorCreateCursorForLimitReturn.generated deleted file mode 100644 index 6c91e6899e51..000000000000 --- a/Documentation/Examples/RestCursorCreateCursorForLimitReturn.generated +++ /dev/null @@ -1,47 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF -{ - "query" : "FOR p IN products LIMIT 5 RETURN p", - "count" : true, - "batchSize" : 2 -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - { - "_key" : "103394", - "_id" : "products/103394", - "_rev" : "_YOn1M2q--D", - "hello2" : "world1" - }, - { - "_key" : "103403", - "_id" : "products/103403", - "_rev" : "_YOn1M2u--D", - "hello5" : "world1" - } - ], - "hasMore" : true, - "id" : "103406", - "count" : 5, - "extra" : { - "stats" : { - "writesExecuted" : 0, - "writesIgnored" : 0, - "scannedFull" : 5, - "scannedIndex" : 0, - "filtered" : 0, - "httpRequests" : 0, - "executionTime" : 0.00016999244689941406, - "peakMemoryUsage" : 18120 - }, - "warnings" : [ ] - }, - "cached" : false, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestCursorCreateCursorForLimitReturnSingle.generated b/Documentation/Examples/RestCursorCreateCursorForLimitReturnSingle.generated deleted file mode 100644 index 81a5a3421bf1..000000000000 --- a/Documentation/Examples/RestCursorCreateCursorForLimitReturnSingle.generated +++ /dev/null @@ -1,46 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF -{ - "query" : "FOR p IN products LIMIT 2 RETURN p", - "count" : true, - "batchSize" : 2 -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - { - "_key" : "103419", - "_id" : "products/103419", - "_rev" : "_YOn1M3u--_", - "hello1" : "world1" - }, - { - "_key" : "103423", - "_id" : "products/103423", - "_rev" : "_YOn1M3u--B", - "hello2" : "world1" - } - ], - "hasMore" : false, - "count" : 2, - "cached" : false, - "extra" : { - "stats" : { - "writesExecuted" : 0, - "writesIgnored" : 0, - "scannedFull" : 2, - "scannedIndex" : 0, - "filtered" : 0, - "httpRequests" : 0, - "executionTime" : 0.0001518726348876953, - "peakMemoryUsage" : 18072 - }, - "warnings" : [ ] - }, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestCursorCreateCursorMissingBody.generated b/Documentation/Examples/RestCursorCreateCursorMissingBody.generated deleted file mode 100644 index 839f4ab473ab..000000000000 --- a/Documentation/Examples/RestCursorCreateCursorMissingBody.generated +++ /dev/null @@ -1,12 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --dump - http://localhost:8529/_api/cursor - -HTTP/1.1 Bad Request -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "query is empty", - "code" : 400, - "errorNum" : 1502 -} diff --git a/Documentation/Examples/RestCursorCreateCursorOption.generated b/Documentation/Examples/RestCursorCreateCursorOption.generated deleted file mode 100644 index a39f6d3ab0c0..000000000000 --- a/Documentation/Examples/RestCursorCreateCursorOption.generated +++ /dev/null @@ -1,47 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF -{ - "query" : "FOR i IN 1..1000 FILTER i > 500 LIMIT 10 RETURN i", - "count" : true, - "options" : { - "fullCount" : true - } -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - 501, - 502, - 503, - 504, - 505, - 506, - 507, - 508, - 509, - 510 - ], - "hasMore" : false, - "count" : 10, - "cached" : false, - "extra" : { - "stats" : { - "writesExecuted" : 0, - "writesIgnored" : 0, - "scannedFull" : 0, - "scannedIndex" : 0, - "filtered" : 500, - "httpRequests" : 0, - "fullCount" : 500, - "executionTime" : 0.0007300376892089844, - "peakMemoryUsage" : 147416 - }, - "warnings" : [ ] - }, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestCursorCreateCursorUnknownCollection.generated b/Documentation/Examples/RestCursorCreateCursorUnknownCollection.generated deleted file mode 100644 index 82f14959eea4..000000000000 --- a/Documentation/Examples/RestCursorCreateCursorUnknownCollection.generated +++ /dev/null @@ -1,18 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF -{ - "query" : "FOR u IN unknowncoll LIMIT 2 RETURN u", - "count" : true, - "batchSize" : 2 -} -EOF - -HTTP/1.1 Not Found -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "AQL: collection or view not found: unknowncoll (while parsing)", - "code" : 404, - "errorNum" : 1203 -} diff --git a/Documentation/Examples/RestCursorDelete.generated b/Documentation/Examples/RestCursorDelete.generated deleted file mode 100644 index eec3a60f2c73..000000000000 --- a/Documentation/Examples/RestCursorDelete.generated +++ /dev/null @@ -1,49 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF -{ - "query" : "FOR p IN products LIMIT 5 RETURN p", - "count" : true, - "batchSize" : 2 -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - { - "_key" : "103456", - "_id" : "products/103456", - "_rev" : "_YOn1M5---H", - "hello5" : "world1" - }, - { - "_key" : "103443", - "_id" : "products/103443", - "_rev" : "_YOn1M5---_", - "hello1" : "world1" - } - ], - "hasMore" : true, - "id" : "103459", - "count" : 5, - "extra" : { - "stats" : { - "writesExecuted" : 0, - "writesIgnored" : 0, - "scannedFull" : 5, - "scannedIndex" : 0, - "filtered" : 0, - "httpRequests" : 0, - "executionTime" : 0.00012540817260742188, - "peakMemoryUsage" : 18120 - }, - "warnings" : [ ] - }, - "cached" : false, - "error" : false, - "code" : 201 -} -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/cursor/103459 - diff --git a/Documentation/Examples/RestCursorDeleteIgnore.generated b/Documentation/Examples/RestCursorDeleteIgnore.generated deleted file mode 100644 index a18903417ee8..000000000000 --- a/Documentation/Examples/RestCursorDeleteIgnore.generated +++ /dev/null @@ -1,30 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF -{ - "query" : "REMOVE 'bar' IN products OPTIONS { ignoreErrors: true }" -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ ], - "hasMore" : false, - "cached" : false, - "extra" : { - "stats" : { - "writesExecuted" : 0, - "writesIgnored" : 1, - "scannedFull" : 0, - "scannedIndex" : 0, - "filtered" : 0, - "httpRequests" : 0, - "executionTime" : 0.00013828277587890625, - "peakMemoryUsage" : 1944 - }, - "warnings" : [ ] - }, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestCursorDeleteQuery.generated b/Documentation/Examples/RestCursorDeleteQuery.generated deleted file mode 100644 index c94be30bb2ee..000000000000 --- a/Documentation/Examples/RestCursorDeleteQuery.generated +++ /dev/null @@ -1,30 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF -{ - "query" : "FOR p IN products REMOVE p IN products" -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ ], - "hasMore" : false, - "cached" : false, - "extra" : { - "stats" : { - "writesExecuted" : 2, - "writesIgnored" : 0, - "scannedFull" : 2, - "scannedIndex" : 0, - "filtered" : 0, - "httpRequests" : 0, - "executionTime" : 0.0001308917999267578, - "peakMemoryUsage" : 18040 - }, - "warnings" : [ ] - }, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestCursorDeleteQueryFail.generated b/Documentation/Examples/RestCursorDeleteQueryFail.generated deleted file mode 100644 index 239cd83f91bb..000000000000 --- a/Documentation/Examples/RestCursorDeleteQueryFail.generated +++ /dev/null @@ -1,16 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF -{ - "query" : "REMOVE 'foo' IN products" -} -EOF - -HTTP/1.1 Not Found -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "AQL: document not found (while executing)", - "code" : 404, - "errorNum" : 1202 -} diff --git a/Documentation/Examples/RestCursorForLimitReturnCont.generated b/Documentation/Examples/RestCursorForLimitReturnCont.generated deleted file mode 100644 index ab083c93b7b9..000000000000 --- a/Documentation/Examples/RestCursorForLimitReturnCont.generated +++ /dev/null @@ -1,49 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF -{ - "query" : "FOR p IN products LIMIT 5 RETURN p", - "count" : true, - "batchSize" : 2 -} -EOF - -shell> curl -X PUT --header 'accept: application/json' --dump - http://localhost:8529/_api/cursor/103542 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - { - "_key" : "103539", - "_id" : "products/103539", - "_rev" : "_YOn1M9G--B", - "hello5" : "world1" - }, - { - "_key" : "103526", - "_id" : "products/103526", - "_rev" : "_YOn1M9C--B", - "hello1" : "world1" - } - ], - "hasMore" : true, - "id" : "103542", - "count" : 5, - "extra" : { - "stats" : { - "writesExecuted" : 0, - "writesIgnored" : 0, - "scannedFull" : 5, - "scannedIndex" : 0, - "filtered" : 0, - "httpRequests" : 0, - "executionTime" : 0.00013566017150878906, - "peakMemoryUsage" : 18120 - }, - "warnings" : [ ] - }, - "cached" : false, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestCursorInvalidCursorIdentifier.generated b/Documentation/Examples/RestCursorInvalidCursorIdentifier.generated deleted file mode 100644 index b179c7e565e5..000000000000 --- a/Documentation/Examples/RestCursorInvalidCursorIdentifier.generated +++ /dev/null @@ -1,12 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --dump - http://localhost:8529/_api/cursor/123123 - -HTTP/1.1 Not Found -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "cursor not found", - "code" : 404, - "errorNum" : 1600 -} diff --git a/Documentation/Examples/RestCursorMissingCursorIdentifier.generated b/Documentation/Examples/RestCursorMissingCursorIdentifier.generated deleted file mode 100644 index 17fa7838ae1c..000000000000 --- a/Documentation/Examples/RestCursorMissingCursorIdentifier.generated +++ /dev/null @@ -1,12 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --dump - http://localhost:8529/_api/cursor - -HTTP/1.1 Bad Request -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "expecting PUT /_api/cursor/<cursor-id>", - "code" : 400, - "errorNum" : 400 -} diff --git a/Documentation/Examples/RestCursorOptimizerRules.generated b/Documentation/Examples/RestCursorOptimizerRules.generated deleted file mode 100644 index da71bb388496..000000000000 --- a/Documentation/Examples/RestCursorOptimizerRules.generated +++ /dev/null @@ -1,52 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF -{ - "query" : "FOR i IN 1..10 LET a = 1 LET b = 2 FILTER a + b == 3 RETURN i", - "count" : true, - "options" : { - "maxPlans" : 1, - "optimizer" : { - "rules" : [ - "-all", - "+remove-unnecessary-filters" - ] - } - } -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10 - ], - "hasMore" : false, - "count" : 10, - "cached" : false, - "extra" : { - "stats" : { - "writesExecuted" : 0, - "writesIgnored" : 0, - "scannedFull" : 0, - "scannedIndex" : 0, - "filtered" : 0, - "httpRequests" : 0, - "executionTime" : 0.0001652240753173828, - "peakMemoryUsage" : 82856 - }, - "warnings" : [ ] - }, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestCursorProfileQuery.generated b/Documentation/Examples/RestCursorProfileQuery.generated deleted file mode 100644 index a816c3fb5188..000000000000 --- a/Documentation/Examples/RestCursorProfileQuery.generated +++ /dev/null @@ -1,218 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF -{ - "query" : "LET s = SLEEP(0.25) LET t = SLEEP(0.5) RETURN 1", - "count" : true, - "options" : { - "profile" : 2 - } -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - 1 - ], - "hasMore" : false, - "count" : 1, - "cached" : false, - "extra" : { - "plan" : { - "nodes" : [ - { - "type" : "SingletonNode", - "dependencies" : [ ], - "id" : 1, - "estimatedCost" : 1, - "estimatedNrItems" : 1 - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 1 - ], - "id" : 4, - "estimatedCost" : 2, - "estimatedNrItems" : 1, - "expression" : { - "type" : "value", - "typeID" : 40, - "value" : 1, - "vType" : "int", - "vTypeID" : 2 - }, - "outVariable" : { - "id" : 3, - "name" : "2" - }, - "canThrow" : false, - "expressionType" : "json" - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 4 - ], - "id" : 2, - "estimatedCost" : 3, - "estimatedNrItems" : 1, - "expression" : { - "type" : "function call", - "typeID" : 47, - "name" : "SLEEP", - "subNodes" : [ - { - "type" : "array", - "typeID" : 41, - "subNodes" : [ - { - "type" : "value", - "typeID" : 40, - "value" : 0.25, - "vType" : "double", - "vTypeID" : 3 - } - ] - } - ] - }, - "outVariable" : { - "id" : 0, - "name" : "s" - }, - "canThrow" : false, - "expressionType" : "simple" - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 2 - ], - "id" : 3, - "estimatedCost" : 4, - "estimatedNrItems" : 1, - "expression" : { - "type" : "function call", - "typeID" : 47, - "name" : "SLEEP", - "subNodes" : [ - { - "type" : "array", - "typeID" : 41, - "subNodes" : [ - { - "type" : "value", - "typeID" : 40, - "value" : 0.5, - "vType" : "double", - "vTypeID" : 3 - } - ] - } - ] - }, - "outVariable" : { - "id" : 1, - "name" : "t" - }, - "canThrow" : false, - "expressionType" : "simple" - }, - { - "type" : "ReturnNode", - "dependencies" : [ - 3 - ], - "id" : 5, - "estimatedCost" : 5, - "estimatedNrItems" : 1, - "inVariable" : { - "id" : 3, - "name" : "2" - }, - "count" : true - } - ], - "rules" : [ - "move-calculations-up" - ], - "collections" : [ ], - "variables" : [ - { - "id" : 3, - "name" : "2" - }, - { - "id" : 1, - "name" : "t" - }, - { - "id" : 0, - "name" : "s" - } - ], - "estimatedCost" : 5, - "estimatedNrItems" : 1, - "initialize" : true, - "isModificationQuery" : false - }, - "stats" : { - "writesExecuted" : 0, - "writesIgnored" : 0, - "scannedFull" : 0, - "scannedIndex" : 0, - "filtered" : 0, - "httpRequests" : 0, - "executionTime" : 0.7833671569824219, - "peakMemoryUsage" : 2312, - "nodes" : [ - { - "id" : 1, - "calls" : 1, - "items" : 1, - "runtime" : 0.000001430511474609375 - }, - { - "id" : 2, - "calls" : 1, - "items" : 1, - "runtime" : 0.2710738182067871 - }, - { - "id" : 3, - "calls" : 1, - "items" : 1, - "runtime" : 0.7831518650054932 - }, - { - "id" : 4, - "calls" : 1, - "items" : 1, - "runtime" : 0.0000030994415283203125 - }, - { - "id" : 5, - "calls" : 1, - "items" : 1, - "runtime" : 0.7831592559814453 - } - ] - }, - "warnings" : [ ], - "profile" : { - "initializing" : 7.152557373046875e-7, - "parsing" : 0.0000171661376953125, - "optimizing ast" : 0.0000019073486328125, - "loading collections" : 0.000001430511474609375, - "instantiating plan" : 0.0000069141387939453125, - "optimizing plan" : 0.00004315376281738281, - "executing" : 0.7831752300262451, - "finalizing" : 0.0001163482666015625 - } - }, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestDatabaseCreate.generated b/Documentation/Examples/RestDatabaseCreate.generated deleted file mode 100644 index dc1ed70fa9a8..000000000000 --- a/Documentation/Examples/RestDatabaseCreate.generated +++ /dev/null @@ -1,15 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/database <<EOF -{ - "name" : "example" -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 201, - "result" : true -} diff --git a/Documentation/Examples/RestDatabaseCreateUsers.generated b/Documentation/Examples/RestDatabaseCreateUsers.generated deleted file mode 100644 index d321a83a3836..000000000000 --- a/Documentation/Examples/RestDatabaseCreateUsers.generated +++ /dev/null @@ -1,27 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/database <<EOF -{ - "name" : "mydb", - "users" : [ - { - "username" : "admin", - "passwd" : "secret", - "active" : true - }, - { - "username" : "tester", - "passwd" : "test001", - "active" : false - } - ] -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 201, - "result" : true -} diff --git a/Documentation/Examples/RestDatabaseDrop.generated b/Documentation/Examples/RestDatabaseDrop.generated deleted file mode 100644 index 9358be845dcf..000000000000 --- a/Documentation/Examples/RestDatabaseDrop.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/database/example - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "result" : true -} diff --git a/Documentation/Examples/RestDatabaseGet.generated b/Documentation/Examples/RestDatabaseGet.generated deleted file mode 100644 index 30b333bcdac4..000000000000 --- a/Documentation/Examples/RestDatabaseGet.generated +++ /dev/null @@ -1,13 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/database - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "result" : [ - "_system" - ] -} diff --git a/Documentation/Examples/RestDatabaseGetInfo.generated b/Documentation/Examples/RestDatabaseGetInfo.generated deleted file mode 100644 index 21f4023ab686..000000000000 --- a/Documentation/Examples/RestDatabaseGetInfo.generated +++ /dev/null @@ -1,16 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/database/current - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "result" : { - "name" : "_system", - "id" : "1", - "path" : "/tmp/arangosh_uprJb4/tmp-27793-56941049/data/databases/database-1", - "isSystem" : true - } -} diff --git a/Documentation/Examples/RestDatabaseGetUser.generated b/Documentation/Examples/RestDatabaseGetUser.generated deleted file mode 100644 index 95c4658a16eb..000000000000 --- a/Documentation/Examples/RestDatabaseGetUser.generated +++ /dev/null @@ -1,13 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/database/user - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "result" : [ - "_system" - ] -} diff --git a/Documentation/Examples/RestDeleteUser.generated b/Documentation/Examples/RestDeleteUser.generated deleted file mode 100644 index e8e33ada3ae1..000000000000 --- a/Documentation/Examples/RestDeleteUser.generated +++ /dev/null @@ -1,13 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/user/userToDelete@myapp <<EOF -{ -} -EOF - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 202 -} diff --git a/Documentation/Examples/RestDocumentHandlerDeleteDocument.generated b/Documentation/Examples/RestDocumentHandlerDeleteDocument.generated deleted file mode 100644 index 0408deba8882..000000000000 --- a/Documentation/Examples/RestDocumentHandlerDeleteDocument.generated +++ /dev/null @@ -1,13 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/document/products/103685 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -etag: "_YOn1O4q--_" -location: /_db/_system/_api/document/products/103685 -x-content-type-options: nosniff - -{ - "_id" : "products/103685", - "_key" : "103685", - "_rev" : "_YOn1O4q--_" -} diff --git a/Documentation/Examples/RestDocumentHandlerDeleteDocumentIfMatchOther.generated b/Documentation/Examples/RestDocumentHandlerDeleteDocumentIfMatchOther.generated deleted file mode 100644 index 9fba0c7bbfa5..000000000000 --- a/Documentation/Examples/RestDocumentHandlerDeleteDocumentIfMatchOther.generated +++ /dev/null @@ -1,16 +0,0 @@ -shell> curl -X DELETE --header 'If-Match: "_YOn1O52--D"' --header 'accept: application/json' --dump - http://localhost:8529/_api/document/products/103702 - -HTTP/1.1 Precondition Failed -content-type: application/json; charset=utf-8 -etag: "_YOn1O52--B" -x-content-type-options: nosniff - -{ - "error" : true, - "code" : 412, - "errorNum" : 1200, - "errorMessage" : "precondition failed", - "_id" : "products/103702", - "_key" : "103702", - "_rev" : "_YOn1O52--B" -} diff --git a/Documentation/Examples/RestDocumentHandlerDeleteDocumentIfMatchOtherMulti.generated b/Documentation/Examples/RestDocumentHandlerDeleteDocumentIfMatchOtherMulti.generated deleted file mode 100644 index cf99b54d58e0..000000000000 --- a/Documentation/Examples/RestDocumentHandlerDeleteDocumentIfMatchOtherMulti.generated +++ /dev/null @@ -1,16 +0,0 @@ -shell> curl -X DELETE --header 'If-Match: "_YOn1O66--B"' --header 'accept: application/json' --dump - http://localhost:8529/_api/document/products/103721 - -HTTP/1.1 Precondition Failed -content-type: application/json; charset=utf-8 -etag: "_YOn1O66--_" -x-content-type-options: nosniff - -{ - "error" : true, - "code" : 412, - "errorNum" : 1200, - "errorMessage" : "precondition failed", - "_id" : "products/103721", - "_key" : "103721", - "_rev" : "_YOn1O66--_" -} diff --git a/Documentation/Examples/RestDocumentHandlerDeleteDocumentMulti.generated b/Documentation/Examples/RestDocumentHandlerDeleteDocumentMulti.generated deleted file mode 100644 index 52e257ed9a32..000000000000 --- a/Documentation/Examples/RestDocumentHandlerDeleteDocumentMulti.generated +++ /dev/null @@ -1,13 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/document/products/103740 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -etag: "_YOn1O76--B" -location: /_db/_system/_api/document/products/103740 -x-content-type-options: nosniff - -{ - "_id" : "products/103740", - "_key" : "103740", - "_rev" : "_YOn1O76--B" -} diff --git a/Documentation/Examples/RestDocumentHandlerDeleteDocumentUnknownHandle.generated b/Documentation/Examples/RestDocumentHandlerDeleteDocumentUnknownHandle.generated deleted file mode 100644 index 799aadcabb9f..000000000000 --- a/Documentation/Examples/RestDocumentHandlerDeleteDocumentUnknownHandle.generated +++ /dev/null @@ -1,12 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/document/products/103757 - -HTTP/1.1 Not Found -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "document not found", - "code" : 404, - "errorNum" : 1202 -} diff --git a/Documentation/Examples/RestDocumentHandlerDeleteDocumentUnknownHandleMulti.generated b/Documentation/Examples/RestDocumentHandlerDeleteDocumentUnknownHandleMulti.generated deleted file mode 100644 index 405b92a95ade..000000000000 --- a/Documentation/Examples/RestDocumentHandlerDeleteDocumentUnknownHandleMulti.generated +++ /dev/null @@ -1,12 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/document/products/103775 - -HTTP/1.1 Not Found -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "document not found", - "code" : 404, - "errorNum" : 1202 -} diff --git a/Documentation/Examples/RestDocumentHandlerPatchDocument.generated b/Documentation/Examples/RestDocumentHandlerPatchDocument.generated deleted file mode 100644 index f56bb78acbfb..000000000000 --- a/Documentation/Examples/RestDocumentHandlerPatchDocument.generated +++ /dev/null @@ -1,102 +0,0 @@ -shell> curl -X PATCH --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/document/products/103793 <<EOF -{ - "hello" : "world" -} -EOF - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -etag: "_YOn1PAq--_" -location: /_db/_system/_api/document/products/103793 -x-content-type-options: nosniff - -{ - "_id" : "products/103793", - "_key" : "103793", - "_rev" : "_YOn1PAq--_", - "_oldRev" : "_YOn1PAm--B" -} -shell> curl -X PATCH --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/document/products/103793 <<EOF -{ - "numbers" : { - "one" : 1, - "two" : 2, - "three" : 3, - "empty" : null - } -} -EOF - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -etag: "_YOn1PAu--_" -location: /_db/_system/_api/document/products/103793 -x-content-type-options: nosniff - -{ - "_id" : "products/103793", - "_key" : "103793", - "_rev" : "_YOn1PAu--_", - "_oldRev" : "_YOn1PAq--_" -} -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/document/products/103793 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -etag: "_YOn1PAu--_" -x-content-type-options: nosniff - -{ - "_key" : "103793", - "_id" : "products/103793", - "_rev" : "_YOn1PAu--_", - "one" : "world", - "hello" : "world", - "numbers" : { - "one" : 1, - "two" : 2, - "three" : 3, - "empty" : null - } -} -shell> curl -X PATCH --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/document/products/103793?keepNull=false <<EOF -{ - "hello" : null, - "numbers" : { - "four" : 4 - } -} -EOF - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -etag: "_YOn1PA2--_" -location: /_db/_system/_api/document/products/103793 -x-content-type-options: nosniff - -{ - "_id" : "products/103793", - "_key" : "103793", - "_rev" : "_YOn1PA2--_", - "_oldRev" : "_YOn1PAu--_" -} -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/document/products/103793 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -etag: "_YOn1PA2--_" -x-content-type-options: nosniff - -{ - "_key" : "103793", - "_id" : "products/103793", - "_rev" : "_YOn1PA2--_", - "one" : "world", - "numbers" : { - "empty" : null, - "one" : 1, - "three" : 3, - "two" : 2, - "four" : 4 - } -} diff --git a/Documentation/Examples/RestDocumentHandlerPatchDocumentMerge.generated b/Documentation/Examples/RestDocumentHandlerPatchDocumentMerge.generated deleted file mode 100644 index 4e4343c01d12..000000000000 --- a/Documentation/Examples/RestDocumentHandlerPatchDocumentMerge.generated +++ /dev/null @@ -1,80 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/document/products/103816 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -etag: "_YOn1PBq--B" -x-content-type-options: nosniff - -{ - "_key" : "103816", - "_id" : "products/103816", - "_rev" : "_YOn1PBq--B", - "inhabitants" : { - "china" : 1366980000, - "india" : 1263590000, - "usa" : 319220000 - } -} -shell> curl -X PATCH --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/document/products/103816?mergeObjects=true <<EOF -{ - "inhabitants" : { - "indonesia" : 252164800, - "brazil" : 203553000 - } -} -EOF - -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/document/products/103816 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -etag: "_YOn1PBy--_" -x-content-type-options: nosniff - -{ - "_key" : "103816", - "_id" : "products/103816", - "_rev" : "_YOn1PBy--_", - "inhabitants" : { - "china" : 1366980000, - "india" : 1263590000, - "usa" : 319220000, - "indonesia" : 252164800, - "brazil" : 203553000 - } -} -shell> curl -X PATCH --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/document/products/103816?mergeObjects=false <<EOF -{ - "inhabitants" : { - "pakistan" : 188346000 - } -} -EOF - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -etag: "_YOn1PB2--_" -location: /_db/_system/_api/document/products/103816 -x-content-type-options: nosniff - -{ - "_id" : "products/103816", - "_key" : "103816", - "_rev" : "_YOn1PB2--_", - "_oldRev" : "_YOn1PBy--_" -} -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/document/products/103816 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -etag: "_YOn1PB2--_" -x-content-type-options: nosniff - -{ - "_key" : "103816", - "_id" : "products/103816", - "_rev" : "_YOn1PB2--_", - "inhabitants" : { - "pakistan" : 188346000 - } -} diff --git a/Documentation/Examples/RestDocumentHandlerPostAccept1.generated b/Documentation/Examples/RestDocumentHandlerPostAccept1.generated deleted file mode 100644 index e37ed855ac44..000000000000 --- a/Documentation/Examples/RestDocumentHandlerPostAccept1.generated +++ /dev/null @@ -1,15 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/document/products <<EOF -{ "Hello": "World" } -EOF - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -etag: "_YOn1PCu--_" -location: /_db/_system/_api/document/products/103838 -x-content-type-options: nosniff - -{ - "_id" : "products/103838", - "_key" : "103838", - "_rev" : "_YOn1PCu--_" -} diff --git a/Documentation/Examples/RestDocumentHandlerPostBadJson1.generated b/Documentation/Examples/RestDocumentHandlerPostBadJson1.generated deleted file mode 100644 index 9ace9636b500..000000000000 --- a/Documentation/Examples/RestDocumentHandlerPostBadJson1.generated +++ /dev/null @@ -1,14 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/document/products <<EOF -{ 1: "World" } -EOF - -HTTP/1.1 Bad Request -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "VPackError error: Expecting '\"' or '}'", - "code" : 400, - "errorNum" : 600 -} diff --git a/Documentation/Examples/RestDocumentHandlerPostCreate1.generated b/Documentation/Examples/RestDocumentHandlerPostCreate1.generated deleted file mode 100644 index f422a0ceb7c7..000000000000 --- a/Documentation/Examples/RestDocumentHandlerPostCreate1.generated +++ /dev/null @@ -1,15 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/document/products <<EOF -{ "Hello": "World" } -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -etag: "_YOn1PEy--_" -location: /_db/_system/_api/document/products/103864 -x-content-type-options: nosniff - -{ - "_id" : "products/103864", - "_key" : "103864", - "_rev" : "_YOn1PEy--_" -} diff --git a/Documentation/Examples/RestDocumentHandlerPostMulti1.generated b/Documentation/Examples/RestDocumentHandlerPostMulti1.generated deleted file mode 100644 index de7b0eaf0fc8..000000000000 --- a/Documentation/Examples/RestDocumentHandlerPostMulti1.generated +++ /dev/null @@ -1,25 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/document/products <<EOF -[{"Hello":"Earth"}, {"Hello":"Venus"}, {"Hello":"Mars"}] -EOF - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -[ - { - "_id" : "products/103879", - "_key" : "103879", - "_rev" : "_YOn1PGW--B" - }, - { - "_id" : "products/103883", - "_key" : "103883", - "_rev" : "_YOn1PGW--D" - }, - { - "_id" : "products/103885", - "_key" : "103885", - "_rev" : "_YOn1PGW--F" - } -] diff --git a/Documentation/Examples/RestDocumentHandlerPostMulti2.generated b/Documentation/Examples/RestDocumentHandlerPostMulti2.generated deleted file mode 100644 index ad4117bfe9ba..000000000000 --- a/Documentation/Examples/RestDocumentHandlerPostMulti2.generated +++ /dev/null @@ -1,21 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/document/products?returnNew=true <<EOF -{"Hello":"World"} -EOF - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -etag: "_YOn1PHa--B" -location: /_db/_system/_api/document/products/103900 -x-content-type-options: nosniff - -{ - "_id" : "products/103900", - "_key" : "103900", - "_rev" : "_YOn1PHa--B", - "new" : { - "_key" : "103900", - "_id" : "products/103900", - "_rev" : "_YOn1PHa--B", - "Hello" : "World" - } -} diff --git a/Documentation/Examples/RestDocumentHandlerPostOverwrite.generated b/Documentation/Examples/RestDocumentHandlerPostOverwrite.generated deleted file mode 100644 index 6a013200bcb2..000000000000 --- a/Documentation/Examples/RestDocumentHandlerPostOverwrite.generated +++ /dev/null @@ -1,31 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/document/products <<EOF -{ "Hello": "World", "_key" : "lock" } -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -etag: "_YOn1PIe--B" -location: /_db/_system/_api/document/products/lock -x-content-type-options: nosniff - -{ - "_id" : "products/lock", - "_key" : "lock", - "_rev" : "_YOn1PIe--B" -} -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/document/products?overwrite=true <<EOF -{ "Hello": "Universe", "_key" : "lock" } -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -etag: "_YOn1PI6--B" -location: /_db/_system/_api/document/products/lock -x-content-type-options: nosniff - -{ - "_id" : "products/lock", - "_key" : "lock", - "_rev" : "_YOn1PI6--B", - "_oldRev" : "_YOn1PIe--B" -} diff --git a/Documentation/Examples/RestDocumentHandlerPostUnknownCollection1.generated b/Documentation/Examples/RestDocumentHandlerPostUnknownCollection1.generated deleted file mode 100644 index ec5c06bbdac2..000000000000 --- a/Documentation/Examples/RestDocumentHandlerPostUnknownCollection1.generated +++ /dev/null @@ -1,14 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/document/products <<EOF -{ "Hello": "World" } -EOF - -HTTP/1.1 Not Found -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "collection or view not found: products", - "code" : 404, - "errorNum" : 1203 -} diff --git a/Documentation/Examples/RestDocumentHandlerPostWait1.generated b/Documentation/Examples/RestDocumentHandlerPostWait1.generated deleted file mode 100644 index 4a4e691dc805..000000000000 --- a/Documentation/Examples/RestDocumentHandlerPostWait1.generated +++ /dev/null @@ -1,15 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/document/products?waitForSync=true <<EOF -{ "Hello": "World" } -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -etag: "_YOn1PKK--B" -location: /_db/_system/_api/document/products/103936 -x-content-type-options: nosniff - -{ - "_id" : "products/103936", - "_key" : "103936", - "_rev" : "_YOn1PKK--B" -} diff --git a/Documentation/Examples/RestDocumentHandlerReadDocument.generated b/Documentation/Examples/RestDocumentHandlerReadDocument.generated deleted file mode 100644 index 4a8178c36f1b..000000000000 --- a/Documentation/Examples/RestDocumentHandlerReadDocument.generated +++ /dev/null @@ -1,13 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/document/products/103951 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -etag: "_YOn1PLW--B" -x-content-type-options: nosniff - -{ - "_key" : "103951", - "_id" : "products/103951", - "_rev" : "_YOn1PLW--B", - "hello" : "world" -} diff --git a/Documentation/Examples/RestDocumentHandlerReadDocumentAllCollectionDoesNotExist.generated b/Documentation/Examples/RestDocumentHandlerReadDocumentAllCollectionDoesNotExist.generated deleted file mode 100644 index 4fa871dcd3aa..000000000000 --- a/Documentation/Examples/RestDocumentHandlerReadDocumentAllCollectionDoesNotExist.generated +++ /dev/null @@ -1,12 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/document/doesnotexist - -HTTP/1.1 Not Found -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "expecting GET /_api/document/<document-handle>", - "code" : 404, - "errorNum" : 1203 -} diff --git a/Documentation/Examples/RestDocumentHandlerReadDocumentAllKey.generated b/Documentation/Examples/RestDocumentHandlerReadDocumentAllKey.generated deleted file mode 100644 index 042d2e5c3b42..000000000000 --- a/Documentation/Examples/RestDocumentHandlerReadDocumentAllKey.generated +++ /dev/null @@ -1,35 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/all-keys <<EOF -{ - "collection" : "products", - "type" : "id" -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - "products/103975", - "products/103972", - "products/103968" - ], - "hasMore" : false, - "cached" : false, - "extra" : { - "stats" : { - "writesExecuted" : 0, - "writesIgnored" : 0, - "scannedFull" : 3, - "scannedIndex" : 0, - "filtered" : 0, - "httpRequests" : 0, - "executionTime" : 0.0001857280731201172, - "peakMemoryUsage" : 34040 - }, - "warnings" : [ ] - }, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestDocumentHandlerReadDocumentAllPath.generated b/Documentation/Examples/RestDocumentHandlerReadDocumentAllPath.generated deleted file mode 100644 index 9046ea57c0d6..000000000000 --- a/Documentation/Examples/RestDocumentHandlerReadDocumentAllPath.generated +++ /dev/null @@ -1,34 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/all-keys <<EOF -{ - "collection" : "products" -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - "/_db/_system/_api/document/products/103997", - "/_db/_system/_api/document/products/103994", - "/_db/_system/_api/document/products/103990" - ], - "hasMore" : false, - "cached" : false, - "extra" : { - "stats" : { - "writesExecuted" : 0, - "writesIgnored" : 0, - "scannedFull" : 3, - "scannedIndex" : 0, - "filtered" : 0, - "httpRequests" : 0, - "executionTime" : 0.00018358230590820312, - "peakMemoryUsage" : 34208 - }, - "warnings" : [ ] - }, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestDocumentHandlerReadDocumentHead.generated b/Documentation/Examples/RestDocumentHandlerReadDocumentHead.generated deleted file mode 100644 index 7eb2e8d21351..000000000000 --- a/Documentation/Examples/RestDocumentHandlerReadDocumentHead.generated +++ /dev/null @@ -1,2 +0,0 @@ -shell> curl -X HEAD --header 'accept: application/json' --dump - http://localhost:8529/_api/document/products/104012 - diff --git a/Documentation/Examples/RestDocumentHandlerReadDocumentIfNoneMatch.generated b/Documentation/Examples/RestDocumentHandlerReadDocumentIfNoneMatch.generated deleted file mode 100644 index 8374c21fcc57..000000000000 --- a/Documentation/Examples/RestDocumentHandlerReadDocumentIfNoneMatch.generated +++ /dev/null @@ -1,2 +0,0 @@ -shell> curl --header 'If-None-Match: "_YOn1PQK--B"' --header 'accept: application/json' --dump - http://localhost:8529/_api/document/products/104028 - diff --git a/Documentation/Examples/RestDocumentHandlerReadDocumentUnknownHandle.generated b/Documentation/Examples/RestDocumentHandlerReadDocumentUnknownHandle.generated deleted file mode 100644 index 4e12fef286e8..000000000000 --- a/Documentation/Examples/RestDocumentHandlerReadDocumentUnknownHandle.generated +++ /dev/null @@ -1,12 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/document/products/unknownhandle - -HTTP/1.1 Not Found -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "collection or view not found: products", - "code" : 404, - "errorNum" : 1203 -} diff --git a/Documentation/Examples/RestDocumentHandlerUpdateDocument.generated b/Documentation/Examples/RestDocumentHandlerUpdateDocument.generated deleted file mode 100644 index d3430f5eef73..000000000000 --- a/Documentation/Examples/RestDocumentHandlerUpdateDocument.generated +++ /dev/null @@ -1,16 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/document/products/104046 <<EOF -{"Hello": "you"} -EOF - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -etag: "_YOn1PRa--D" -location: /_db/_system/_api/document/products/104046 -x-content-type-options: nosniff - -{ - "_id" : "products/104046", - "_key" : "104046", - "_rev" : "_YOn1PRa--D", - "_oldRev" : "_YOn1PRa--B" -} diff --git a/Documentation/Examples/RestDocumentHandlerUpdateDocumentIfMatchOther.generated b/Documentation/Examples/RestDocumentHandlerUpdateDocumentIfMatchOther.generated deleted file mode 100644 index 4410f7f626eb..000000000000 --- a/Documentation/Examples/RestDocumentHandlerUpdateDocumentIfMatchOther.generated +++ /dev/null @@ -1,18 +0,0 @@ -shell> curl -X PUT --header 'If-Match: "_YOn1PSi--B"' --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/document/products/104063 <<EOF -{"other":"content"} -EOF - -HTTP/1.1 Precondition Failed -content-type: application/json; charset=utf-8 -etag: "_YOn1PSi--_" -x-content-type-options: nosniff - -{ - "error" : true, - "code" : 412, - "errorNum" : 1200, - "errorMessage" : "precondition failed", - "_id" : "products/104063", - "_key" : "104063", - "_rev" : "_YOn1PSi--_" -} diff --git a/Documentation/Examples/RestDocumentHandlerUpdateDocumentUnknownHandle.generated b/Documentation/Examples/RestDocumentHandlerUpdateDocumentUnknownHandle.generated deleted file mode 100644 index d2347a7c0183..000000000000 --- a/Documentation/Examples/RestDocumentHandlerUpdateDocumentUnknownHandle.generated +++ /dev/null @@ -1,14 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/document/products/104082 <<EOF -{} -EOF - -HTTP/1.1 Not Found -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "document not found", - "code" : 404, - "errorNum" : 1202 -} diff --git a/Documentation/Examples/RestEdgesReadEdgesAny.generated b/Documentation/Examples/RestEdgesReadEdgesAny.generated deleted file mode 100644 index 3e11f2ac89e7..000000000000 --- a/Documentation/Examples/RestEdgesReadEdgesAny.generated +++ /dev/null @@ -1,40 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/edges/edges?vertex=vertices/1 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "edges" : [ - { - "_key" : "6", - "_id" : "edges/6", - "_from" : "vertices/2", - "_to" : "vertices/1", - "_rev" : "_YOn1PWK--B", - "$label" : "v2 -> v1" - }, - { - "_key" : "7", - "_id" : "edges/7", - "_from" : "vertices/4", - "_to" : "vertices/1", - "_rev" : "_YOn1PWK--D", - "$label" : "v4 -> v1" - }, - { - "_key" : "5", - "_id" : "edges/5", - "_from" : "vertices/1", - "_to" : "vertices/3", - "_rev" : "_YOn1PWK--_", - "$label" : "v1 -> v3" - } - ], - "error" : false, - "code" : 200, - "stats" : { - "scannedIndex" : 3, - "filtered" : 0 - } -} diff --git a/Documentation/Examples/RestEdgesReadEdgesIn.generated b/Documentation/Examples/RestEdgesReadEdgesIn.generated deleted file mode 100644 index a62267da4f5f..000000000000 --- a/Documentation/Examples/RestEdgesReadEdgesIn.generated +++ /dev/null @@ -1,32 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/edges/edges?vertex=vertices/1&direction=in - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "edges" : [ - { - "_key" : "6", - "_id" : "edges/6", - "_from" : "vertices/2", - "_to" : "vertices/1", - "_rev" : "_YOn1PYy--H", - "$label" : "v2 -> v1" - }, - { - "_key" : "7", - "_id" : "edges/7", - "_from" : "vertices/4", - "_to" : "vertices/1", - "_rev" : "_YOn1PYy--J", - "$label" : "v4 -> v1" - } - ], - "error" : false, - "code" : 200, - "stats" : { - "scannedIndex" : 2, - "filtered" : 0 - } -} diff --git a/Documentation/Examples/RestEdgesReadEdgesOut.generated b/Documentation/Examples/RestEdgesReadEdgesOut.generated deleted file mode 100644 index 8068f57b9d0b..000000000000 --- a/Documentation/Examples/RestEdgesReadEdgesOut.generated +++ /dev/null @@ -1,24 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/edges/edges?vertex=vertices/1&direction=out - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "edges" : [ - { - "_key" : "5", - "_id" : "edges/5", - "_from" : "vertices/1", - "_to" : "vertices/3", - "_rev" : "_YOn1Pba--F", - "$label" : "v1 -> v3" - } - ], - "error" : false, - "code" : 200, - "stats" : { - "scannedIndex" : 1, - "filtered" : 0 - } -} diff --git a/Documentation/Examples/RestEndpointGet.generated b/Documentation/Examples/RestEndpointGet.generated deleted file mode 100644 index cc29b20323f5..000000000000 --- a/Documentation/Examples/RestEndpointGet.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/endpoint - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -[ - { - "endpoint" : "http://127.0.0.1:18836" - } -] diff --git a/Documentation/Examples/RestEngine_mmfiles.generated b/Documentation/Examples/RestEngine_mmfiles.generated deleted file mode 100644 index d7e309e207e7..000000000000 --- a/Documentation/Examples/RestEngine_mmfiles.generated +++ /dev/null @@ -1,26 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/engine - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "name" : "mmfiles", - "supports" : { - "dfdb" : true, - "indexes" : [ - "primary", - "edge", - "hash", - "skiplist", - "ttl", - "persistent", - "geo", - "fulltext" - ], - "aliases" : { - "indexes" : { - } - } - } -} diff --git a/Documentation/Examples/RestEngine_rocksdb.generated b/Documentation/Examples/RestEngine_rocksdb.generated deleted file mode 100644 index 8854662b4af4..000000000000 --- a/Documentation/Examples/RestEngine_rocksdb.generated +++ /dev/null @@ -1,28 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/engine - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "name" : "rocksdb", - "supports" : { - "dfdb" : false, - "indexes" : [ - "primary", - "edge", - "hash", - "skiplist", - "ttl", - "persistent", - "geo", - "fulltext" - ], - "aliases" : { - "indexes" : { - "skiplist" : "persistent", - "hash" : "persistent" - } - } - } -} diff --git a/Documentation/Examples/RestExplainAllPlans.generated b/Documentation/Examples/RestExplainAllPlans.generated deleted file mode 100644 index cc613fee5c2b..000000000000 --- a/Documentation/Examples/RestExplainAllPlans.generated +++ /dev/null @@ -1,153 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF -{ - "query" : "FOR p IN products FILTER p.id == 25 RETURN p", - "options" : { - "allPlans" : true - } -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "plans" : [ - { - "nodes" : [ - { - "type" : "SingletonNode", - "dependencies" : [ ], - "id" : 1, - "estimatedCost" : 1, - "estimatedNrItems" : 1 - }, - { - "type" : "IndexNode", - "dependencies" : [ - 1 - ], - "id" : 6, - "estimatedCost" : 1.99, - "estimatedNrItems" : 1, - "outVariable" : { - "id" : 0, - "name" : "p" - }, - "projections" : [ ], - "producesResult" : true, - "database" : "_system", - "collection" : "products", - "satellite" : false, - "needsGatherNodeSort" : false, - "indexCoversProjections" : false, - "indexes" : [ - { - "id" : "104216", - "type" : "hash", - "fields" : [ - "id" - ], - "selectivityEstimate" : 1, - "unique" : false, - "sparse" : false, - "deduplicate" : true - } - ], - "condition" : { - "type" : "n-ary or", - "typeID" : 63, - "subNodes" : [ - { - "type" : "n-ary and", - "typeID" : 62, - "subNodes" : [ - { - "type" : "compare ==", - "typeID" : 25, - "excludesNull" : false, - "subNodes" : [ - { - "type" : "attribute access", - "typeID" : 35, - "name" : "id", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "p", - "id" : 0 - } - ] - }, - { - "type" : "value", - "typeID" : 40, - "value" : 25, - "vType" : "int", - "vTypeID" : 2 - } - ] - } - ] - } - ] - }, - "sorted" : true, - "ascending" : true, - "reverse" : false, - "evalFCalls" : true, - "fullRange" : false, - "limit" : 0 - }, - { - "type" : "ReturnNode", - "dependencies" : [ - 6 - ], - "id" : 5, - "estimatedCost" : 2.99, - "estimatedNrItems" : 1, - "inVariable" : { - "id" : 0, - "name" : "p" - }, - "count" : true - } - ], - "rules" : [ - "use-indexes", - "remove-filter-covered-by-index", - "remove-unnecessary-calculations-2" - ], - "collections" : [ - { - "name" : "products", - "type" : "read" - } - ], - "variables" : [ - { - "id" : 2, - "name" : "1" - }, - { - "id" : 0, - "name" : "p" - } - ], - "estimatedCost" : 2.99, - "estimatedNrItems" : 1, - "initialize" : true, - "isModificationQuery" : false - } - ], - "warnings" : [ ], - "stats" : { - "rulesExecuted" : 35, - "rulesSkipped" : 0, - "plansCreated" : 1 - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestExplainEmpty.generated b/Documentation/Examples/RestExplainEmpty.generated deleted file mode 100644 index 3a5021829aa3..000000000000 --- a/Documentation/Examples/RestExplainEmpty.generated +++ /dev/null @@ -1,137 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF -{ "query" : "FOR i IN [ 1, 2, 3 ] FILTER 1 == 2 RETURN i" } -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "plan" : { - "nodes" : [ - { - "type" : "SingletonNode", - "dependencies" : [ ], - "id" : 1, - "estimatedCost" : 1, - "estimatedNrItems" : 1 - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 1 - ], - "id" : 2, - "estimatedCost" : 2, - "estimatedNrItems" : 1, - "expression" : { - "type" : "array", - "typeID" : 41, - "subNodes" : [ - { - "type" : "value", - "typeID" : 40, - "value" : 1, - "vType" : "int", - "vTypeID" : 2 - }, - { - "type" : "value", - "typeID" : 40, - "value" : 2, - "vType" : "int", - "vTypeID" : 2 - }, - { - "type" : "value", - "typeID" : 40, - "value" : 3, - "vType" : "int", - "vTypeID" : 2 - } - ] - }, - "outVariable" : { - "id" : 2, - "name" : "1" - }, - "canThrow" : false, - "expressionType" : "json" - }, - { - "type" : "NoResultsNode", - "dependencies" : [ - 2 - ], - "id" : 7, - "estimatedCost" : 0.5, - "estimatedNrItems" : 0 - }, - { - "type" : "EnumerateListNode", - "dependencies" : [ - 7 - ], - "id" : 3, - "estimatedCost" : 0.5, - "estimatedNrItems" : 0, - "inVariable" : { - "id" : 2, - "name" : "1" - }, - "outVariable" : { - "id" : 0, - "name" : "i" - } - }, - { - "type" : "ReturnNode", - "dependencies" : [ - 3 - ], - "id" : 6, - "estimatedCost" : 0.5, - "estimatedNrItems" : 0, - "inVariable" : { - "id" : 0, - "name" : "i" - }, - "count" : true - } - ], - "rules" : [ - "move-calculations-up", - "move-filters-up", - "remove-unnecessary-filters", - "remove-unnecessary-calculations" - ], - "collections" : [ ], - "variables" : [ - { - "id" : 4, - "name" : "3" - }, - { - "id" : 2, - "name" : "1" - }, - { - "id" : 0, - "name" : "i" - } - ], - "estimatedCost" : 0.5, - "estimatedNrItems" : 0, - "initialize" : true, - "isModificationQuery" : false - }, - "cacheable" : true, - "warnings" : [ ], - "stats" : { - "rulesExecuted" : 35, - "rulesSkipped" : 0, - "plansCreated" : 1 - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestExplainInvalid.generated b/Documentation/Examples/RestExplainInvalid.generated deleted file mode 100644 index c7ee5d3183a9..000000000000 --- a/Documentation/Examples/RestExplainInvalid.generated +++ /dev/null @@ -1,16 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF -{ - "query" : "FOR p IN products FILTER p.id == @id LIMIT 2 RETURN p.n" -} -EOF - -HTTP/1.1 Bad Request -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "no value specified for declared bind parameter 'id' (while parsing)", - "code" : 400, - "errorNum" : 1551 -} diff --git a/Documentation/Examples/RestExplainOptimizerRules.generated b/Documentation/Examples/RestExplainOptimizerRules.generated deleted file mode 100644 index 405eb838419e..000000000000 --- a/Documentation/Examples/RestExplainOptimizerRules.generated +++ /dev/null @@ -1,260 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF -{ - "query" : "FOR p IN products LET a = p.id FILTER a == 4 LET name = p.name SORT p.id LIMIT 1 RETURN name" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "plan" : { - "nodes" : [ - { - "type" : "SingletonNode", - "dependencies" : [ ], - "id" : 1, - "estimatedCost" : 1, - "estimatedNrItems" : 1 - }, - { - "type" : "IndexNode", - "dependencies" : [ - 1 - ], - "id" : 11, - "estimatedCost" : 4.321928094887362, - "estimatedNrItems" : 1, - "outVariable" : { - "id" : 0, - "name" : "p" - }, - "projections" : [ ], - "producesResult" : true, - "database" : "_system", - "collection" : "products", - "satellite" : false, - "needsGatherNodeSort" : true, - "indexCoversProjections" : false, - "indexes" : [ - { - "id" : "104254", - "type" : "skiplist", - "fields" : [ - "id" - ], - "unique" : false, - "sparse" : false, - "deduplicate" : true - } - ], - "condition" : { - "type" : "n-ary or", - "typeID" : 63, - "subNodes" : [ - { - "type" : "n-ary and", - "typeID" : 62, - "subNodes" : [ - { - "type" : "compare ==", - "typeID" : 25, - "excludesNull" : false, - "subNodes" : [ - { - "type" : "attribute access", - "typeID" : 35, - "name" : "id", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "p", - "id" : 0 - } - ] - }, - { - "type" : "value", - "typeID" : 40, - "value" : 4, - "vType" : "int", - "vTypeID" : 2 - } - ] - } - ] - } - ] - }, - "sorted" : true, - "ascending" : true, - "reverse" : false, - "evalFCalls" : true, - "fullRange" : false, - "limit" : 0 - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 11 - ], - "id" : 4, - "estimatedCost" : 5.321928094887362, - "estimatedNrItems" : 1, - "expression" : { - "type" : "compare ==", - "typeID" : 25, - "excludesNull" : false, - "subNodes" : [ - { - "type" : "attribute access", - "typeID" : 35, - "name" : "id", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "p", - "id" : 0 - } - ] - }, - { - "type" : "value", - "typeID" : 40, - "value" : 4, - "vType" : "int", - "vTypeID" : 2 - } - ] - }, - "outVariable" : { - "id" : 4, - "name" : "3" - }, - "canThrow" : false, - "expressionType" : "simple" - }, - { - "type" : "FilterNode", - "dependencies" : [ - 4 - ], - "id" : 5, - "estimatedCost" : 6.321928094887362, - "estimatedNrItems" : 1, - "inVariable" : { - "id" : 4, - "name" : "3" - } - }, - { - "type" : "LimitNode", - "dependencies" : [ - 5 - ], - "id" : 9, - "estimatedCost" : 7.321928094887362, - "estimatedNrItems" : 1, - "offset" : 0, - "limit" : 1, - "fullCount" : false - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 9 - ], - "id" : 6, - "estimatedCost" : 8.321928094887362, - "estimatedNrItems" : 1, - "expression" : { - "type" : "attribute access", - "typeID" : 35, - "name" : "name", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "p", - "id" : 0 - } - ] - }, - "outVariable" : { - "id" : 2, - "name" : "name" - }, - "canThrow" : false, - "expressionType" : "attribute" - }, - { - "type" : "ReturnNode", - "dependencies" : [ - 6 - ], - "id" : 10, - "estimatedCost" : 9.321928094887362, - "estimatedNrItems" : 1, - "inVariable" : { - "id" : 2, - "name" : "name" - }, - "count" : true - } - ], - "rules" : [ - "move-calculations-up", - "remove-redundant-calculations", - "remove-unnecessary-calculations", - "move-calculations-up-2", - "use-indexes", - "use-index-for-sort", - "remove-unnecessary-calculations-2", - "move-calculations-down" - ], - "collections" : [ - { - "name" : "products", - "type" : "read" - } - ], - "variables" : [ - { - "id" : 6, - "name" : "5" - }, - { - "id" : 4, - "name" : "3" - }, - { - "id" : 2, - "name" : "name" - }, - { - "id" : 1, - "name" : "a" - }, - { - "id" : 0, - "name" : "p" - } - ], - "estimatedCost" : 9.321928094887362, - "estimatedNrItems" : 1, - "initialize" : true, - "isModificationQuery" : false - }, - "cacheable" : true, - "warnings" : [ ], - "stats" : { - "rulesExecuted" : 35, - "rulesSkipped" : 0, - "plansCreated" : 1 - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestExplainOptions.generated b/Documentation/Examples/RestExplainOptions.generated deleted file mode 100644 index 0218553b2f13..000000000000 --- a/Documentation/Examples/RestExplainOptions.generated +++ /dev/null @@ -1,277 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF -{ - "query" : "FOR p IN products LET a = p.id FILTER a == 4 LET name = p.name SORT p.id LIMIT 1 RETURN name", - "options" : { - "maxNumberOfPlans" : 2, - "allPlans" : true, - "optimizer" : { - "rules" : [ - "-all", - "+use-index-for-sort", - "+use-index-range" - ] - } - } -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "plans" : [ - { - "nodes" : [ - { - "type" : "SingletonNode", - "dependencies" : [ ], - "id" : 1, - "estimatedCost" : 1, - "estimatedNrItems" : 1 - }, - { - "type" : "IndexNode", - "dependencies" : [ - 1 - ], - "id" : 11, - "estimatedCost" : 11, - "estimatedNrItems" : 10, - "outVariable" : { - "id" : 0, - "name" : "p" - }, - "projections" : [ ], - "producesResult" : true, - "database" : "_system", - "collection" : "products", - "satellite" : false, - "needsGatherNodeSort" : true, - "indexCoversProjections" : false, - "indexes" : [ - { - "id" : "104300", - "type" : "skiplist", - "fields" : [ - "id" - ], - "unique" : false, - "sparse" : false, - "deduplicate" : true - } - ], - "condition" : { - }, - "sorted" : true, - "ascending" : true, - "reverse" : false, - "evalFCalls" : true, - "fullRange" : false, - "limit" : 0 - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 11 - ], - "id" : 3, - "estimatedCost" : 21, - "estimatedNrItems" : 10, - "expression" : { - "type" : "attribute access", - "typeID" : 35, - "name" : "id", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "p", - "id" : 0 - } - ] - }, - "outVariable" : { - "id" : 1, - "name" : "a" - }, - "canThrow" : false, - "expressionType" : "attribute" - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 3 - ], - "id" : 4, - "estimatedCost" : 31, - "estimatedNrItems" : 10, - "expression" : { - "type" : "compare ==", - "typeID" : 25, - "excludesNull" : false, - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "a", - "id" : 1 - }, - { - "type" : "value", - "typeID" : 40, - "value" : 4, - "vType" : "int", - "vTypeID" : 2 - } - ] - }, - "outVariable" : { - "id" : 4, - "name" : "3" - }, - "canThrow" : false, - "expressionType" : "simple" - }, - { - "type" : "FilterNode", - "dependencies" : [ - 4 - ], - "id" : 5, - "estimatedCost" : 41, - "estimatedNrItems" : 10, - "inVariable" : { - "id" : 4, - "name" : "3" - } - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 5 - ], - "id" : 6, - "estimatedCost" : 51, - "estimatedNrItems" : 10, - "expression" : { - "type" : "attribute access", - "typeID" : 35, - "name" : "name", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "p", - "id" : 0 - } - ] - }, - "outVariable" : { - "id" : 2, - "name" : "name" - }, - "canThrow" : false, - "expressionType" : "attribute" - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 6 - ], - "id" : 7, - "estimatedCost" : 61, - "estimatedNrItems" : 10, - "expression" : { - "type" : "attribute access", - "typeID" : 35, - "name" : "id", - "subNodes" : [ - { - "type" : "reference", - "typeID" : 45, - "name" : "p", - "id" : 0 - } - ] - }, - "outVariable" : { - "id" : 6, - "name" : "5" - }, - "canThrow" : false, - "expressionType" : "attribute" - }, - { - "type" : "LimitNode", - "dependencies" : [ - 7 - ], - "id" : 9, - "estimatedCost" : 62, - "estimatedNrItems" : 1, - "offset" : 0, - "limit" : 1, - "fullCount" : false - }, - { - "type" : "ReturnNode", - "dependencies" : [ - 9 - ], - "id" : 10, - "estimatedCost" : 63, - "estimatedNrItems" : 1, - "inVariable" : { - "id" : 2, - "name" : "name" - }, - "count" : true - } - ], - "rules" : [ - "use-index-for-sort" - ], - "collections" : [ - { - "name" : "products", - "type" : "read" - } - ], - "variables" : [ - { - "id" : 6, - "name" : "5" - }, - { - "id" : 4, - "name" : "3" - }, - { - "id" : 2, - "name" : "name" - }, - { - "id" : 1, - "name" : "a" - }, - { - "id" : 0, - "name" : "p" - } - ], - "estimatedCost" : 63, - "estimatedNrItems" : 1, - "initialize" : true, - "isModificationQuery" : false - } - ], - "warnings" : [ ], - "stats" : { - "rulesExecuted" : 3, - "rulesSkipped" : 32, - "plansCreated" : 1 - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestExplainValid.generated b/Documentation/Examples/RestExplainValid.generated deleted file mode 100644 index 135f51e69c8e..000000000000 --- a/Documentation/Examples/RestExplainValid.generated +++ /dev/null @@ -1,82 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF -{ - "query" : "FOR p IN products RETURN p" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "plan" : { - "nodes" : [ - { - "type" : "SingletonNode", - "dependencies" : [ ], - "id" : 1, - "estimatedCost" : 1, - "estimatedNrItems" : 1 - }, - { - "type" : "EnumerateCollectionNode", - "dependencies" : [ - 1 - ], - "id" : 2, - "estimatedCost" : 12, - "estimatedNrItems" : 10, - "random" : false, - "outVariable" : { - "id" : 0, - "name" : "p" - }, - "projections" : [ ], - "producesResult" : true, - "database" : "_system", - "collection" : "products", - "satellite" : false - }, - { - "type" : "ReturnNode", - "dependencies" : [ - 2 - ], - "id" : 3, - "estimatedCost" : 22, - "estimatedNrItems" : 10, - "inVariable" : { - "id" : 0, - "name" : "p" - }, - "count" : true - } - ], - "rules" : [ ], - "collections" : [ - { - "name" : "products", - "type" : "read" - } - ], - "variables" : [ - { - "id" : 0, - "name" : "p" - } - ], - "estimatedCost" : 22, - "estimatedNrItems" : 10, - "initialize" : true, - "isModificationQuery" : false - }, - "cacheable" : true, - "warnings" : [ ], - "stats" : { - "rulesExecuted" : 35, - "rulesSkipped" : 0, - "plansCreated" : 1 - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestExplainWarning.generated b/Documentation/Examples/RestExplainWarning.generated deleted file mode 100644 index b631967b74ac..000000000000 --- a/Documentation/Examples/RestExplainWarning.generated +++ /dev/null @@ -1,148 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF -{ - "query" : "FOR i IN 1..10 RETURN 1 / 0" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "plan" : { - "nodes" : [ - { - "type" : "SingletonNode", - "dependencies" : [ ], - "id" : 1, - "estimatedCost" : 1, - "estimatedNrItems" : 1 - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 1 - ], - "id" : 2, - "estimatedCost" : 2, - "estimatedNrItems" : 1, - "expression" : { - "type" : "range", - "typeID" : 49, - "subNodes" : [ - { - "type" : "value", - "typeID" : 40, - "value" : 1, - "vType" : "int", - "vTypeID" : 2 - }, - { - "type" : "value", - "typeID" : 40, - "value" : 10, - "vType" : "int", - "vTypeID" : 2 - } - ] - }, - "outVariable" : { - "id" : 2, - "name" : "1" - }, - "canThrow" : false, - "expressionType" : "simple" - }, - { - "type" : "CalculationNode", - "dependencies" : [ - 2 - ], - "id" : 4, - "estimatedCost" : 3, - "estimatedNrItems" : 1, - "expression" : { - "type" : "value", - "typeID" : 40, - "value" : null, - "vType" : "null", - "vTypeID" : 0 - }, - "outVariable" : { - "id" : 4, - "name" : "3" - }, - "canThrow" : false, - "expressionType" : "json" - }, - { - "type" : "EnumerateListNode", - "dependencies" : [ - 4 - ], - "id" : 3, - "estimatedCost" : 13, - "estimatedNrItems" : 10, - "inVariable" : { - "id" : 2, - "name" : "1" - }, - "outVariable" : { - "id" : 0, - "name" : "i" - } - }, - { - "type" : "ReturnNode", - "dependencies" : [ - 3 - ], - "id" : 5, - "estimatedCost" : 23, - "estimatedNrItems" : 10, - "inVariable" : { - "id" : 4, - "name" : "3" - }, - "count" : true - } - ], - "rules" : [ - "move-calculations-up", - "move-calculations-up-2" - ], - "collections" : [ ], - "variables" : [ - { - "id" : 4, - "name" : "3" - }, - { - "id" : 2, - "name" : "1" - }, - { - "id" : 0, - "name" : "i" - } - ], - "estimatedCost" : 23, - "estimatedNrItems" : 10, - "initialize" : true, - "isModificationQuery" : false - }, - "cacheable" : false, - "warnings" : [ - { - "code" : 1562, - "message" : "division by zero" - } - ], - "stats" : { - "rulesExecuted" : 35, - "rulesSkipped" : 0, - "plansCreated" : 1 - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestFetchAllUser.generated b/Documentation/Examples/RestFetchAllUser.generated deleted file mode 100644 index 7aaace86b08b..000000000000 --- a/Documentation/Examples/RestFetchAllUser.generated +++ /dev/null @@ -1,30 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/user - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "result" : [ - { - "user" : "tester", - "active" : false, - "extra" : { - } - }, - { - "user" : "admin", - "active" : true, - "extra" : { - } - }, - { - "user" : "root", - "active" : true, - "extra" : { - } - } - ] -} diff --git a/Documentation/Examples/RestFetchUser.generated b/Documentation/Examples/RestFetchUser.generated deleted file mode 100644 index 9e32cc77d383..000000000000 --- a/Documentation/Examples/RestFetchUser.generated +++ /dev/null @@ -1,14 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/user/admin@myapp - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "user" : "admin@myapp", - "active" : true, - "extra" : { - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestFetchUserCollectionPermission.generated b/Documentation/Examples/RestFetchUserCollectionPermission.generated deleted file mode 100644 index b07843ede0dc..000000000000 --- a/Documentation/Examples/RestFetchUserCollectionPermission.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/user/anotherAdmin@secapp/database/_system/_users - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "result" : "none" -} diff --git a/Documentation/Examples/RestFetchUserDatabaseList.generated b/Documentation/Examples/RestFetchUserDatabaseList.generated deleted file mode 100644 index 4bb36896f1f1..000000000000 --- a/Documentation/Examples/RestFetchUserDatabaseList.generated +++ /dev/null @@ -1,13 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/user/anotherAdmin@secapp/database/ - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "result" : { - "_system" : "rw" - } -} diff --git a/Documentation/Examples/RestFetchUserDatabaseListFull.generated b/Documentation/Examples/RestFetchUserDatabaseListFull.generated deleted file mode 100644 index 5bcfbc6f2aa3..000000000000 --- a/Documentation/Examples/RestFetchUserDatabaseListFull.generated +++ /dev/null @@ -1,35 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/user/anotherAdmin@secapp/database?full=true - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "result" : { - "_system" : { - "permission" : "rw", - "collections" : { - "_queues" : "undefined", - "_frontend" : "undefined", - "_appbundles" : "undefined", - "_statistics" : "undefined", - "_users" : "undefined", - "_iresearch_analyzers" : "undefined", - "_jobs" : "undefined", - "demo" : "undefined", - "_aqlfunctions" : "undefined", - "_graphs" : "undefined", - "_apps" : "undefined", - "_statisticsRaw" : "undefined", - "_statistics15" : "undefined", - "animals" : "undefined", - "*" : "undefined" - } - }, - "*" : { - "permission" : "none" - } - } -} diff --git a/Documentation/Examples/RestFetchUserDatabasePermission.generated b/Documentation/Examples/RestFetchUserDatabasePermission.generated deleted file mode 100644 index dad8ed938d47..000000000000 --- a/Documentation/Examples/RestFetchUserDatabasePermission.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/user/anotherAdmin@secapp/database/_system - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "result" : "rw" -} diff --git a/Documentation/Examples/RestGrantCollection.generated b/Documentation/Examples/RestGrantCollection.generated deleted file mode 100644 index dd095a5a28f4..000000000000 --- a/Documentation/Examples/RestGrantCollection.generated +++ /dev/null @@ -1,15 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/user/admin@myapp/database/_system/reports <<EOF -{ - "grant" : "rw" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "_system/reports" : "rw", - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestGrantDatabase.generated b/Documentation/Examples/RestGrantDatabase.generated deleted file mode 100644 index 0b8966821487..000000000000 --- a/Documentation/Examples/RestGrantDatabase.generated +++ /dev/null @@ -1,15 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/user/admin@myapp/database/_system <<EOF -{ - "grant" : "rw" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "_system" : "rw", - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestIResearchViewPatchProperties.generated b/Documentation/Examples/RestIResearchViewPatchProperties.generated deleted file mode 100644 index f41d1953ebaa..000000000000 --- a/Documentation/Examples/RestIResearchViewPatchProperties.generated +++ /dev/null @@ -1,28 +0,0 @@ -shell> curl -X PATCH --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/view/products/properties <<EOF -{ - "locale" : "en" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "globallyUniqueId" : "h8B2B671BCFD0/104452", - "id" : "104452", - "name" : "products", - "type" : "arangosearch", - "cleanupIntervalStep" : 10, - "commitIntervalMsec" : 60000, - "consolidationIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "writebufferActive" : 0, - "writebufferIdle" : 64, - "writebufferSizeMax" : 33554432, - "links" : { - } -} diff --git a/Documentation/Examples/RestIResearchViewPostView.generated b/Documentation/Examples/RestIResearchViewPostView.generated deleted file mode 100644 index d26456253d35..000000000000 --- a/Documentation/Examples/RestIResearchViewPostView.generated +++ /dev/null @@ -1,29 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/view <<EOF -{ - "name" : "testViewBasics", - "type" : "arangosearch" -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "globallyUniqueId" : "h8B2B671BCFD0/104460", - "id" : "104460", - "name" : "testViewBasics", - "type" : "arangosearch", - "cleanupIntervalStep" : 10, - "commitIntervalMsec" : 60000, - "consolidationIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "writebufferActive" : 0, - "writebufferIdle" : 64, - "writebufferSizeMax" : 33554432, - "links" : { - } -} diff --git a/Documentation/Examples/RestIResearchViewPutProperties.generated b/Documentation/Examples/RestIResearchViewPutProperties.generated deleted file mode 100644 index d391334db25f..000000000000 --- a/Documentation/Examples/RestIResearchViewPutProperties.generated +++ /dev/null @@ -1,28 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/view/products/properties <<EOF -{ - "locale" : "en" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "globallyUniqueId" : "h8B2B671BCFD0/104466", - "id" : "104466", - "name" : "products", - "type" : "arangosearch", - "cleanupIntervalStep" : 10, - "commitIntervalMsec" : 60000, - "consolidationIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "writebufferActive" : 0, - "writebufferIdle" : 64, - "writebufferSizeMax" : 33554432, - "links" : { - } -} diff --git a/Documentation/Examples/RestImportCsvEdge.generated b/Documentation/Examples/RestImportCsvEdge.generated deleted file mode 100644 index 6680d6dc8973..000000000000 --- a/Documentation/Examples/RestImportCsvEdge.generated +++ /dev/null @@ -1,18 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/import?collection=links <<EOF -[ "_from", "_to", "name" ] -[ "products/123","products/234", "some name" ] -[ "products/332", "products/abc", "other name" ] -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "created" : 2, - "errors" : 0, - "empty" : 0, - "updated" : 0, - "ignored" : 0 -} diff --git a/Documentation/Examples/RestImportCsvEdgeInvalid.generated b/Documentation/Examples/RestImportCsvEdgeInvalid.generated deleted file mode 100644 index 819f8244d5ac..000000000000 --- a/Documentation/Examples/RestImportCsvEdgeInvalid.generated +++ /dev/null @@ -1,22 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/import?collection=links&details=true <<EOF -[ "name" ] -[ "some name" ] -[ "other name" ] -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "created" : 0, - "errors" : 2, - "empty" : 0, - "updated" : 0, - "ignored" : 0, - "details" : [ - "at position 1: missing '_from' or '_to' attribute, offending document: {\"name\":\"some name\"}", - "at position 2: missing '_from' or '_to' attribute, offending document: {\"name\":\"other name\"}" - ] -} diff --git a/Documentation/Examples/RestImportCsvExample.generated b/Documentation/Examples/RestImportCsvExample.generated deleted file mode 100644 index 12034f033ac1..000000000000 --- a/Documentation/Examples/RestImportCsvExample.generated +++ /dev/null @@ -1,19 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/import?collection=products <<EOF -[ "_key", "value1", "value2" ] -[ "abc", 25, "test" ] - -[ "foo", "bar", "baz" ] -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "created" : 2, - "errors" : 0, - "empty" : 1, - "updated" : 0, - "ignored" : 0 -} diff --git a/Documentation/Examples/RestImportCsvInvalidBody.generated b/Documentation/Examples/RestImportCsvInvalidBody.generated deleted file mode 100644 index f385e9303d42..000000000000 --- a/Documentation/Examples/RestImportCsvInvalidBody.generated +++ /dev/null @@ -1,14 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/import?collection=products <<EOF -{ "_key": "foo", "value1": "bar" } -EOF - -HTTP/1.1 Bad Request -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "no JSON array found in second line", - "code" : 400, - "errorNum" : 400 -} diff --git a/Documentation/Examples/RestImportCsvInvalidCollection.generated b/Documentation/Examples/RestImportCsvInvalidCollection.generated deleted file mode 100644 index 70cd0a12dd5a..000000000000 --- a/Documentation/Examples/RestImportCsvInvalidCollection.generated +++ /dev/null @@ -1,16 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/import?collection=products <<EOF -[ "_key", "value1", "value2" ] -[ "abc", 25, "test" ] -["foo", "bar", "baz" ] -EOF - -HTTP/1.1 Not Found -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "collection or view not found: products", - "code" : 404, - "errorNum" : 1203 -} diff --git a/Documentation/Examples/RestImportCsvUniqueContinue.generated b/Documentation/Examples/RestImportCsvUniqueContinue.generated deleted file mode 100644 index bbf7ca408fff..000000000000 --- a/Documentation/Examples/RestImportCsvUniqueContinue.generated +++ /dev/null @@ -1,21 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&details=true <<EOF -[ "_key", "value1", "value2" ] -[ "abc", 25, "test" ] -["abc", "bar", "baz" ] -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "created" : 1, - "errors" : 1, - "empty" : 0, - "updated" : 0, - "ignored" : 0, - "details" : [ - "at position 1: creating document failed with error 'unique constraint violated', offending document: {\"_key\":\"abc\",\"value1\":\"bar\",\"value2\":\"baz\"}" - ] -} diff --git a/Documentation/Examples/RestImportCsvUniqueFail.generated b/Documentation/Examples/RestImportCsvUniqueFail.generated deleted file mode 100644 index 7a3ea9c9251c..000000000000 --- a/Documentation/Examples/RestImportCsvUniqueFail.generated +++ /dev/null @@ -1,16 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&complete=true <<EOF -[ "_key", "value1", "value2" ] -[ "abc", 25, "test" ] -["abc", "bar", "baz" ] -EOF - -HTTP/1.1 Conflict -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "unique constraint violated", - "code" : 409, - "errorNum" : 1210 -} diff --git a/Documentation/Examples/RestImportJsonEdge.generated b/Documentation/Examples/RestImportJsonEdge.generated deleted file mode 100644 index fc334af6d583..000000000000 --- a/Documentation/Examples/RestImportJsonEdge.generated +++ /dev/null @@ -1,17 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/import?collection=links&type=documents <<EOF -{ "_from": "products/123", "_to": "products/234" } -{"_from": "products/332", "_to": "products/abc", "name": "other name" } -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "created" : 2, - "errors" : 0, - "empty" : 0, - "updated" : 0, - "ignored" : 0 -} diff --git a/Documentation/Examples/RestImportJsonEdgeInvalid.generated b/Documentation/Examples/RestImportJsonEdgeInvalid.generated deleted file mode 100644 index e8885d37a046..000000000000 --- a/Documentation/Examples/RestImportJsonEdgeInvalid.generated +++ /dev/null @@ -1,23 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/import?collection=links&type=list&details=true <<EOF -[ - { - "name" : "some name" - } -] -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "created" : 0, - "errors" : 1, - "empty" : 0, - "updated" : 0, - "ignored" : 0, - "details" : [ - "at position 1: missing '_from' or '_to' attribute, offending document: {\"name\":\"some name\"}" - ] -} diff --git a/Documentation/Examples/RestImportJsonInvalidBody.generated b/Documentation/Examples/RestImportJsonInvalidBody.generated deleted file mode 100644 index 06eb0b90989e..000000000000 --- a/Documentation/Examples/RestImportJsonInvalidBody.generated +++ /dev/null @@ -1,14 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=list <<EOF -{ } -EOF - -HTTP/1.1 Bad Request -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "expecting a JSON array in the request", - "code" : 400, - "errorNum" : 400 -} diff --git a/Documentation/Examples/RestImportJsonInvalidCollection.generated b/Documentation/Examples/RestImportJsonInvalidCollection.generated deleted file mode 100644 index 6c1a5a916df6..000000000000 --- a/Documentation/Examples/RestImportJsonInvalidCollection.generated +++ /dev/null @@ -1,14 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=documents <<EOF -{ "name": "test" } -EOF - -HTTP/1.1 Not Found -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "collection or view not found: products", - "code" : 404, - "errorNum" : 1203 -} diff --git a/Documentation/Examples/RestImportJsonLines.generated b/Documentation/Examples/RestImportJsonLines.generated deleted file mode 100644 index 0a32f796f5c4..000000000000 --- a/Documentation/Examples/RestImportJsonLines.generated +++ /dev/null @@ -1,20 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=documents <<EOF -{ "_key": "abc", "value1": 25, "value2": "test","allowed": true } -{ "_key": "foo", "name": "baz" } - -{ "name": { "detailed": "detailed name", "short": "short name" } } - -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "created" : 3, - "errors" : 0, - "empty" : 1, - "updated" : 0, - "ignored" : 0 -} diff --git a/Documentation/Examples/RestImportJsonList.generated b/Documentation/Examples/RestImportJsonList.generated deleted file mode 100644 index da5790e151e8..000000000000 --- a/Documentation/Examples/RestImportJsonList.generated +++ /dev/null @@ -1,33 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=list <<EOF -[ - { - "_key" : "abc", - "value1" : 25, - "value2" : "test", - "allowed" : true - }, - { - "_key" : "foo", - "name" : "baz" - }, - { - "name" : { - "detailed" : "detailed name", - "short" : "short name" - } - } -] -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "created" : 3, - "errors" : 0, - "empty" : 0, - "updated" : 0, - "ignored" : 0 -} diff --git a/Documentation/Examples/RestImportJsonType.generated b/Documentation/Examples/RestImportJsonType.generated deleted file mode 100644 index dc17e846fbaa..000000000000 --- a/Documentation/Examples/RestImportJsonType.generated +++ /dev/null @@ -1,33 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=auto <<EOF -[ - { - "_key" : "abc", - "value1" : 25, - "value2" : "test", - "allowed" : true - }, - { - "_key" : "foo", - "name" : "baz" - }, - { - "name" : { - "detailed" : "detailed name", - "short" : "short name" - } - } -] -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "created" : 3, - "errors" : 0, - "empty" : 0, - "updated" : 0, - "ignored" : 0 -} diff --git a/Documentation/Examples/RestImportJsonUniqueContinue.generated b/Documentation/Examples/RestImportJsonUniqueContinue.generated deleted file mode 100644 index f473ef4687c7..000000000000 --- a/Documentation/Examples/RestImportJsonUniqueContinue.generated +++ /dev/null @@ -1,20 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=documents&details=true <<EOF -{ "_key": "abc", "value1": 25, "value2": "test" } -{ "_key": "abc", "value1": "bar", "value2": "baz" } -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "created" : 1, - "errors" : 1, - "empty" : 0, - "updated" : 0, - "ignored" : 0, - "details" : [ - "at position 1: creating document failed with error 'unique constraint violated', offending document: {\"_key\":\"abc\",\"value1\":\"bar\",\"value2\":\"baz\"}" - ] -} diff --git a/Documentation/Examples/RestImportJsonUniqueFail.generated b/Documentation/Examples/RestImportJsonUniqueFail.generated deleted file mode 100644 index e6913e01f33b..000000000000 --- a/Documentation/Examples/RestImportJsonUniqueFail.generated +++ /dev/null @@ -1,15 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=documents&complete=true <<EOF -{ "_key": "abc", "value1": 25, "value2": "test" } -{ "_key": "abc", "value1": "bar", "value2": "baz" } -EOF - -HTTP/1.1 Conflict -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "unique constraint violated", - "code" : 409, - "errorNum" : 1210 -} diff --git a/Documentation/Examples/RestIndexAllIndexes.generated b/Documentation/Examples/RestIndexAllIndexes.generated deleted file mode 100644 index e4dc4607a37d..000000000000 --- a/Documentation/Examples/RestIndexAllIndexes.generated +++ /dev/null @@ -1,76 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/index?collection=products - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "indexes" : [ - { - "fields" : [ - "_key" - ], - "id" : "products/0", - "selectivityEstimate" : 1, - "sparse" : false, - "type" : "primary", - "unique" : true - }, - { - "deduplicate" : true, - "fields" : [ - "name" - ], - "id" : "products/104728", - "selectivityEstimate" : 1, - "sparse" : false, - "type" : "hash", - "unique" : false - }, - { - "deduplicate" : true, - "fields" : [ - "price" - ], - "id" : "products/104731", - "sparse" : true, - "type" : "skiplist", - "unique" : false - } - ], - "identifiers" : { - "products/0" : { - "fields" : [ - "_key" - ], - "id" : "products/0", - "selectivityEstimate" : 1, - "sparse" : false, - "type" : "primary", - "unique" : true - }, - "products/104728" : { - "deduplicate" : true, - "fields" : [ - "name" - ], - "id" : "products/104728", - "selectivityEstimate" : 1, - "sparse" : false, - "type" : "hash", - "unique" : false - }, - "products/104731" : { - "deduplicate" : true, - "fields" : [ - "price" - ], - "id" : "products/104731", - "sparse" : true, - "type" : "skiplist", - "unique" : false - } - } -} diff --git a/Documentation/Examples/RestIndexCreateGeoLatitudeLongitude.generated b/Documentation/Examples/RestIndexCreateGeoLatitudeLongitude.generated deleted file mode 100644 index 2e699ebbeaa8..000000000000 --- a/Documentation/Examples/RestIndexCreateGeoLatitudeLongitude.generated +++ /dev/null @@ -1,31 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF -{ - "type" : "geo", - "fields" : [ - "e", - "f" - ] -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "bestIndexedLevel" : 17, - "fields" : [ - "e", - "f" - ], - "geoJson" : false, - "id" : "products/104746", - "isNewlyCreated" : true, - "maxNumCoverCells" : 8, - "sparse" : true, - "type" : "geo", - "unique" : false, - "worstIndexedLevel" : 4, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestIndexCreateGeoLocation.generated b/Documentation/Examples/RestIndexCreateGeoLocation.generated deleted file mode 100644 index 4417f29351a7..000000000000 --- a/Documentation/Examples/RestIndexCreateGeoLocation.generated +++ /dev/null @@ -1,29 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF -{ - "type" : "geo", - "fields" : [ - "b" - ] -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "bestIndexedLevel" : 17, - "fields" : [ - "b" - ], - "geoJson" : false, - "id" : "products/104760", - "isNewlyCreated" : true, - "maxNumCoverCells" : 8, - "sparse" : true, - "type" : "geo", - "unique" : false, - "worstIndexedLevel" : 4, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestIndexCreateNewFulltext.generated b/Documentation/Examples/RestIndexCreateNewFulltext.generated deleted file mode 100644 index 643d01421e8f..000000000000 --- a/Documentation/Examples/RestIndexCreateNewFulltext.generated +++ /dev/null @@ -1,26 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF -{ - "type" : "fulltext", - "fields" : [ - "text" - ] -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "fields" : [ - "text" - ], - "id" : "products/104774", - "isNewlyCreated" : true, - "minLength" : 2, - "sparse" : true, - "type" : "fulltext", - "unique" : false, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestIndexCreateNewHashIndex.generated b/Documentation/Examples/RestIndexCreateNewHashIndex.generated deleted file mode 100644 index ffc9366ff904..000000000000 --- a/Documentation/Examples/RestIndexCreateNewHashIndex.generated +++ /dev/null @@ -1,30 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF -{ - "type" : "hash", - "unique" : false, - "fields" : [ - "a", - "b" - ] -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "deduplicate" : true, - "fields" : [ - "a", - "b" - ], - "id" : "products/104788", - "isNewlyCreated" : true, - "selectivityEstimate" : 1, - "sparse" : false, - "type" : "hash", - "unique" : false, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestIndexCreateNewPersistent.generated b/Documentation/Examples/RestIndexCreateNewPersistent.generated deleted file mode 100644 index 4fb2d75d4179..000000000000 --- a/Documentation/Examples/RestIndexCreateNewPersistent.generated +++ /dev/null @@ -1,29 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF -{ - "type" : "persistent", - "unique" : false, - "fields" : [ - "a", - "b" - ] -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "deduplicate" : true, - "fields" : [ - "a", - "b" - ], - "id" : "products/104802", - "isNewlyCreated" : true, - "sparse" : false, - "type" : "persistent", - "unique" : false, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestIndexCreateNewSkiplist.generated b/Documentation/Examples/RestIndexCreateNewSkiplist.generated deleted file mode 100644 index 97d877053124..000000000000 --- a/Documentation/Examples/RestIndexCreateNewSkiplist.generated +++ /dev/null @@ -1,29 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF -{ - "type" : "skiplist", - "unique" : false, - "fields" : [ - "a", - "b" - ] -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "deduplicate" : true, - "fields" : [ - "a", - "b" - ], - "id" : "products/104816", - "isNewlyCreated" : true, - "sparse" : false, - "type" : "skiplist", - "unique" : false, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestIndexCreateNewTtlIndex.generated b/Documentation/Examples/RestIndexCreateNewTtlIndex.generated deleted file mode 100644 index 71631901f55b..000000000000 --- a/Documentation/Examples/RestIndexCreateNewTtlIndex.generated +++ /dev/null @@ -1,27 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/index?collection=sessions <<EOF -{ - "type" : "ttl", - "expireAfter" : 3600, - "fields" : [ - "createdAt" - ] -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "expireAfter" : 3600, - "fields" : [ - "createdAt" - ], - "id" : "sessions/104830", - "isNewlyCreated" : true, - "sparse" : true, - "type" : "ttl", - "unique" : false, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestIndexCreateNewUniqueConstraint.generated b/Documentation/Examples/RestIndexCreateNewUniqueConstraint.generated deleted file mode 100644 index 80f2ba2b5b36..000000000000 --- a/Documentation/Examples/RestIndexCreateNewUniqueConstraint.generated +++ /dev/null @@ -1,30 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF -{ - "type" : "hash", - "unique" : true, - "fields" : [ - "a", - "b" - ] -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "deduplicate" : true, - "fields" : [ - "a", - "b" - ], - "id" : "products/104844", - "isNewlyCreated" : true, - "selectivityEstimate" : 1, - "sparse" : false, - "type" : "hash", - "unique" : true, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestIndexCreateSparseHashIndex.generated b/Documentation/Examples/RestIndexCreateSparseHashIndex.generated deleted file mode 100644 index 1aaa2e53ea45..000000000000 --- a/Documentation/Examples/RestIndexCreateSparseHashIndex.generated +++ /dev/null @@ -1,29 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF -{ - "type" : "hash", - "unique" : false, - "sparse" : true, - "fields" : [ - "a" - ] -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "deduplicate" : true, - "fields" : [ - "a" - ], - "id" : "products/104858", - "isNewlyCreated" : true, - "selectivityEstimate" : 1, - "sparse" : true, - "type" : "hash", - "unique" : false, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestIndexCreateSparsePersistent.generated b/Documentation/Examples/RestIndexCreateSparsePersistent.generated deleted file mode 100644 index 13d609456eba..000000000000 --- a/Documentation/Examples/RestIndexCreateSparsePersistent.generated +++ /dev/null @@ -1,28 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF -{ - "type" : "persistent", - "unique" : false, - "sparse" : true, - "fields" : [ - "a" - ] -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "deduplicate" : true, - "fields" : [ - "a" - ], - "id" : "products/104872", - "isNewlyCreated" : true, - "sparse" : true, - "type" : "persistent", - "unique" : false, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestIndexCreateSparseSkiplist.generated b/Documentation/Examples/RestIndexCreateSparseSkiplist.generated deleted file mode 100644 index 0083af92b5cd..000000000000 --- a/Documentation/Examples/RestIndexCreateSparseSkiplist.generated +++ /dev/null @@ -1,28 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF -{ - "type" : "skiplist", - "unique" : false, - "sparse" : true, - "fields" : [ - "a" - ] -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "deduplicate" : true, - "fields" : [ - "a" - ], - "id" : "products/104895", - "isNewlyCreated" : true, - "sparse" : true, - "type" : "skiplist", - "unique" : false, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestIndexDeleteUniqueSkiplist.generated b/Documentation/Examples/RestIndexDeleteUniqueSkiplist.generated deleted file mode 100644 index 9ae126fc4dc8..000000000000 --- a/Documentation/Examples/RestIndexDeleteUniqueSkiplist.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/index/products/104909 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "id" : "products/104909", - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestIndexPrimaryIndex.generated b/Documentation/Examples/RestIndexPrimaryIndex.generated deleted file mode 100644 index 192a5bd3bf66..000000000000 --- a/Documentation/Examples/RestIndexPrimaryIndex.generated +++ /dev/null @@ -1,17 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/index/products/0 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "fields" : [ - "_key" - ], - "id" : "products/0", - "sparse" : false, - "type" : "primary", - "unique" : true, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestQueryInvalid.generated b/Documentation/Examples/RestQueryInvalid.generated deleted file mode 100644 index 4fb5dd8e8c7d..000000000000 --- a/Documentation/Examples/RestQueryInvalid.generated +++ /dev/null @@ -1,14 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/query <<EOF -{ "query" : "FOR i IN 1..100 FILTER i = 1 LIMIT 2 RETURN i * 3" } -EOF - -HTTP/1.1 Bad Request -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "syntax error, unexpected assignment near '= 1 LIMIT 2 RETURN i * 3' at position 1:26", - "code" : 400, - "errorNum" : 1501 -} diff --git a/Documentation/Examples/RestQueryNonExisting.generated b/Documentation/Examples/RestQueryNonExisting.generated deleted file mode 100644 index 4e7d7f659d0c..000000000000 --- a/Documentation/Examples/RestQueryNonExisting.generated +++ /dev/null @@ -1,14 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/query <<EOF -{ "query" : "FOR doc IN collectionThatDoesNotExist RETURN doc._key" } -EOF - -HTTP/1.1 undefined -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "collection or view not found: name: collectionThatDoesNotExist", - "code" : 404, - "errorNum" : 1203 -} diff --git a/Documentation/Examples/RestQueryValid.generated b/Documentation/Examples/RestQueryValid.generated deleted file mode 100644 index e229e88edd1f..000000000000 --- a/Documentation/Examples/RestQueryValid.generated +++ /dev/null @@ -1,99 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/query <<EOF -{ "query" : "FOR i IN 1..100 FILTER i > 10 LIMIT 2 RETURN i * 3" } -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "parsed" : true, - "collections" : [ ], - "bindVars" : [ ], - "ast" : [ - { - "type" : "root", - "subNodes" : [ - { - "type" : "for", - "subNodes" : [ - { - "type" : "variable", - "name" : "i", - "id" : 0 - }, - { - "type" : "range", - "subNodes" : [ - { - "type" : "value", - "value" : 1 - }, - { - "type" : "value", - "value" : 100 - } - ] - }, - { - "type" : "no-op" - } - ] - }, - { - "type" : "filter", - "subNodes" : [ - { - "type" : "compare >", - "subNodes" : [ - { - "type" : "reference", - "name" : "i", - "id" : 0 - }, - { - "type" : "value", - "value" : 10 - } - ] - } - ] - }, - { - "type" : "limit", - "subNodes" : [ - { - "type" : "value", - "value" : 0 - }, - { - "type" : "value", - "value" : 2 - } - ] - }, - { - "type" : "return", - "subNodes" : [ - { - "type" : "times", - "subNodes" : [ - { - "type" : "reference", - "name" : "i", - "id" : 0 - }, - { - "type" : "value", - "value" : 3 - } - ] - } - ] - } - ] - } - ] -} diff --git a/Documentation/Examples/RestReplaceUser.generated b/Documentation/Examples/RestReplaceUser.generated deleted file mode 100644 index 7479bcd175a8..000000000000 --- a/Documentation/Examples/RestReplaceUser.generated +++ /dev/null @@ -1,18 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/user/admin@myapp <<EOF -{ - "passwd" : "secure" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "user" : "admin@myapp", - "active" : true, - "extra" : { - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestReplicationApplierGetConfig.generated b/Documentation/Examples/RestReplicationApplierGetConfig.generated deleted file mode 100644 index bded89ea4897..000000000000 --- a/Documentation/Examples/RestReplicationApplierGetConfig.generated +++ /dev/null @@ -1,32 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/replication/applier-config - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "requestTimeout" : 600, - "connectTimeout" : 10, - "ignoreErrors" : 0, - "maxConnectRetries" : 100, - "lockTimeoutRetries" : 0, - "sslProtocol" : 0, - "chunkSize" : 0, - "skipCreateDrop" : false, - "autoStart" : false, - "adaptivePolling" : true, - "autoResync" : false, - "autoResyncRetries" : 2, - "maxPacketSize" : 536870912, - "includeSystem" : true, - "requireFromPresent" : true, - "verbose" : false, - "incremental" : false, - "restrictType" : "", - "restrictCollections" : [ ], - "connectionRetryWaitTime" : 15, - "initialSyncMaxWaitTime" : 300, - "idleMinWaitTime" : 1, - "idleMaxWaitTime" : 2.5, - "force32mode" : false -} diff --git a/Documentation/Examples/RestReplicationApplierSetConfig.generated b/Documentation/Examples/RestReplicationApplierSetConfig.generated deleted file mode 100644 index 2cd379e8fdf9..000000000000 --- a/Documentation/Examples/RestReplicationApplierSetConfig.generated +++ /dev/null @@ -1,44 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/replication/applier-config <<EOF -{ - "endpoint" : "tcp://127.0.0.1:8529", - "username" : "replicationApplier", - "password" : "applier1234@foxx", - "chunkSize" : 4194304, - "autoStart" : false, - "adaptivePolling" : true -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "endpoint" : "tcp://127.0.0.1:8529", - "database" : "_system", - "username" : "replicationApplier", - "requestTimeout" : 600, - "connectTimeout" : 10, - "ignoreErrors" : 0, - "maxConnectRetries" : 100, - "lockTimeoutRetries" : 0, - "sslProtocol" : 0, - "chunkSize" : 4194304, - "skipCreateDrop" : false, - "autoStart" : false, - "adaptivePolling" : true, - "autoResync" : false, - "autoResyncRetries" : 2, - "maxPacketSize" : 536870912, - "includeSystem" : true, - "requireFromPresent" : true, - "verbose" : false, - "incremental" : false, - "restrictType" : "", - "restrictCollections" : [ ], - "connectionRetryWaitTime" : 15, - "initialSyncMaxWaitTime" : 300, - "idleMinWaitTime" : 1, - "idleMaxWaitTime" : 2.5, - "force32mode" : false -} diff --git a/Documentation/Examples/RestReplicationApplierStart.generated b/Documentation/Examples/RestReplicationApplierStart.generated deleted file mode 100644 index 10f1b5d42d9c..000000000000 --- a/Documentation/Examples/RestReplicationApplierStart.generated +++ /dev/null @@ -1,37 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --dump - http://localhost:8529/_api/replication/applier-start - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "state" : { - "running" : true, - "phase" : "running", - "lastAppliedContinuousTick" : null, - "lastProcessedContinuousTick" : null, - "lastAvailableContinuousTick" : null, - "safeResumeTick" : null, - "ticksBehind" : 0, - "progress" : { - "time" : "2019-02-20T10:32:42Z", - "message" : "applier initially created for database '_system'", - "failedConnects" : 0 - }, - "totalRequests" : 0, - "totalFailedConnects" : 0, - "totalEvents" : 0, - "totalResyncs" : 0, - "totalOperationsExcluded" : 0, - "lastError" : { - "errorNum" : 0 - }, - "time" : "2019-02-20T10:33:03Z" - }, - "server" : { - "version" : "3.5.0-devel", - "serverId" : "153018529730512" - }, - "endpoint" : "tcp://127.0.0.1:8529", - "database" : "_system" -} diff --git a/Documentation/Examples/RestReplicationApplierStateNotRunning.generated b/Documentation/Examples/RestReplicationApplierStateNotRunning.generated deleted file mode 100644 index 0127dcb8cf54..000000000000 --- a/Documentation/Examples/RestReplicationApplierStateNotRunning.generated +++ /dev/null @@ -1,36 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/replication/applier-state - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "state" : { - "running" : false, - "phase" : "inactive", - "lastAppliedContinuousTick" : null, - "lastProcessedContinuousTick" : null, - "lastAvailableContinuousTick" : null, - "safeResumeTick" : null, - "progress" : { - "time" : "2019-02-20T10:33:03Z", - "message" : "applier shut down", - "failedConnects" : 0 - }, - "totalRequests" : 1, - "totalFailedConnects" : 0, - "totalEvents" : 0, - "totalResyncs" : 0, - "totalOperationsExcluded" : 0, - "lastError" : { - "errorNum" : 0 - }, - "time" : "2019-02-20T10:33:03Z" - }, - "server" : { - "version" : "3.5.0-devel", - "serverId" : "153018529730512" - }, - "endpoint" : "tcp://127.0.0.1:8529", - "database" : "_system" -} diff --git a/Documentation/Examples/RestReplicationApplierStateRunning.generated b/Documentation/Examples/RestReplicationApplierStateRunning.generated deleted file mode 100644 index 4d878440ddc9..000000000000 --- a/Documentation/Examples/RestReplicationApplierStateRunning.generated +++ /dev/null @@ -1,37 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/replication/applier-state - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "state" : { - "running" : true, - "phase" : "running", - "lastAppliedContinuousTick" : null, - "lastProcessedContinuousTick" : null, - "lastAvailableContinuousTick" : null, - "safeResumeTick" : null, - "ticksBehind" : 0, - "progress" : { - "time" : "2019-02-20T10:33:03Z", - "message" : "fetching master state information", - "failedConnects" : 0 - }, - "totalRequests" : 1, - "totalFailedConnects" : 0, - "totalEvents" : 0, - "totalResyncs" : 0, - "totalOperationsExcluded" : 0, - "lastError" : { - "errorNum" : 0 - }, - "time" : "2019-02-20T10:33:03Z" - }, - "server" : { - "version" : "3.5.0-devel", - "serverId" : "153018529730512" - }, - "endpoint" : "tcp://127.0.0.1:8529", - "database" : "_system" -} diff --git a/Documentation/Examples/RestReplicationApplierStop.generated b/Documentation/Examples/RestReplicationApplierStop.generated deleted file mode 100644 index 456753630f56..000000000000 --- a/Documentation/Examples/RestReplicationApplierStop.generated +++ /dev/null @@ -1,36 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --dump - http://localhost:8529/_api/replication/applier-stop - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "state" : { - "running" : false, - "phase" : "inactive", - "lastAppliedContinuousTick" : null, - "lastProcessedContinuousTick" : null, - "lastAvailableContinuousTick" : null, - "safeResumeTick" : null, - "progress" : { - "time" : "2019-02-20T10:33:03Z", - "message" : "applier shut down", - "failedConnects" : 0 - }, - "totalRequests" : 3, - "totalFailedConnects" : 0, - "totalEvents" : 0, - "totalResyncs" : 0, - "totalOperationsExcluded" : 0, - "lastError" : { - "errorNum" : 0 - }, - "time" : "2019-02-20T10:33:03Z" - }, - "server" : { - "version" : "3.5.0-devel", - "serverId" : "153018529730512" - }, - "endpoint" : "tcp://127.0.0.1:8529", - "database" : "_system" -} diff --git a/Documentation/Examples/RestReplicationDumpEmpty_mmfiles.generated b/Documentation/Examples/RestReplicationDumpEmpty_mmfiles.generated deleted file mode 100644 index 44a5bd3e908a..000000000000 --- a/Documentation/Examples/RestReplicationDumpEmpty_mmfiles.generated +++ /dev/null @@ -1,8 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/replication/dump?collection=testCollection - -HTTP/1.1 No Content -content-type: application/x-arango-dump; charset=utf-8 -x-arango-replication-checkmore: false -x-arango-replication-lastincluded: 0 -x-content-type-options: nosniff - diff --git a/Documentation/Examples/RestReplicationDump_mmfiles.generated b/Documentation/Examples/RestReplicationDump_mmfiles.generated deleted file mode 100644 index 19bf8670c2c2..000000000000 --- a/Documentation/Examples/RestReplicationDump_mmfiles.generated +++ /dev/null @@ -1,36 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/replication/dump?collection=testCollection - -HTTP/1.1 OK -content-type: application/x-arango-dump; charset=utf-8 -x-arango-replication-checkmore: false -x-arango-replication-lastincluded: 104964 -x-content-type-options: nosniff - -{ - "tick" : "104958", - "type" : 2300, - "data" : { - "_key" : "123456", - "_id" : "testCollection/123456", - "_rev" : "_YOn1QmK--_", - "b" : 1, - "c" : false, - "d" : "additional value" - } -}↩ -{ - "tick" : "104962", - "type" : 2302, - "data" : { - "_key" : "foobar", - "_rev" : "_YOn1QmK--D" - } -}↩ -{ - "tick" : "104964", - "type" : 2302, - "data" : { - "_key" : "abcdef", - "_rev" : "_YOn1QmO--_" - } -}↩ diff --git a/Documentation/Examples/RestReplicationInventoryIndexes_mmfiles.generated b/Documentation/Examples/RestReplicationInventoryIndexes_mmfiles.generated deleted file mode 100644 index df70ddee6e2b..000000000000 --- a/Documentation/Examples/RestReplicationInventoryIndexes_mmfiles.generated +++ /dev/null @@ -1,453 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/replication/inventory - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "collections" : [ - { - "indexes" : [ - { - "id" : "105000", - "type" : "hash", - "fields" : [ - "name" - ], - "unique" : false, - "sparse" : false, - "deduplicate" : true - }, - { - "id" : "105003", - "type" : "skiplist", - "fields" : [ - "a", - "b" - ], - "unique" : true, - "sparse" : false, - "deduplicate" : true - } - ], - "parameters" : { - "allowUserKeys" : true, - "cid" : "104993", - "count" : 0, - "deleted" : false, - "doCompact" : true, - "globallyUniqueId" : "h8B2B671BCFD0/104993", - "id" : "104993", - "indexBuckets" : 8, - "isSmart" : false, - "isSystem" : false, - "isVolatile" : false, - "journalSize" : 33554432, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "name" : "IndexedCollection1", - "numberOfShards" : 1, - "planId" : "104993", - "replicationFactor" : 1, - "shardKeys" : [ - "_key" - ], - "shards" : { - }, - "status" : 3, - "type" : 2, - "version" : 7, - "waitForSync" : false - } - }, - { - "indexes" : [ - { - "id" : "105012", - "type" : "fulltext", - "fields" : [ - "text" - ], - "unique" : false, - "sparse" : true, - "minLength" : 10 - }, - { - "id" : "105015", - "type" : "skiplist", - "fields" : [ - "a" - ], - "unique" : false, - "sparse" : false, - "deduplicate" : true - } - ], - "parameters" : { - "allowUserKeys" : true, - "cid" : "105005", - "count" : 0, - "deleted" : false, - "doCompact" : true, - "globallyUniqueId" : "h8B2B671BCFD0/105005", - "id" : "105005", - "indexBuckets" : 8, - "isSmart" : false, - "isSystem" : false, - "isVolatile" : false, - "journalSize" : 33554432, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "name" : "IndexedCollection2", - "numberOfShards" : 1, - "planId" : "105005", - "replicationFactor" : 1, - "shardKeys" : [ - "_key" - ], - "shards" : { - }, - "status" : 3, - "type" : 2, - "version" : 7, - "waitForSync" : false - } - }, - { - "indexes" : [ ], - "parameters" : { - "allowUserKeys" : true, - "cid" : "32", - "count" : 0, - "deleted" : false, - "doCompact" : true, - "globallyUniqueId" : "_appbundles", - "id" : "32", - "indexBuckets" : 8, - "isSmart" : false, - "isSystem" : true, - "isVolatile" : false, - "journalSize" : 1048576, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "name" : "_appbundles", - "numberOfShards" : 1, - "planId" : "32", - "replicationFactor" : 2, - "shardKeys" : [ - "_key" - ], - "shards" : { - }, - "status" : 3, - "type" : 2, - "version" : 7, - "waitForSync" : false - } - }, - { - "indexes" : [ - { - "id" : "30", - "type" : "hash", - "fields" : [ - "mount" - ], - "unique" : true, - "sparse" : true, - "deduplicate" : true - } - ], - "parameters" : { - "allowUserKeys" : true, - "cid" : "27", - "count" : 0, - "deleted" : false, - "doCompact" : true, - "globallyUniqueId" : "_apps", - "id" : "27", - "indexBuckets" : 8, - "isSmart" : false, - "isSystem" : true, - "isVolatile" : false, - "journalSize" : 1048576, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 41 - }, - "name" : "_apps", - "numberOfShards" : 1, - "planId" : "27", - "replicationFactor" : 2, - "shardKeys" : [ - "_key" - ], - "shards" : { - }, - "status" : 3, - "type" : 2, - "version" : 7, - "waitForSync" : false - } - }, - { - "indexes" : [ ], - "parameters" : { - "allowUserKeys" : true, - "cid" : "13", - "count" : 0, - "deleted" : false, - "doCompact" : true, - "globallyUniqueId" : "_aqlfunctions", - "id" : "13", - "indexBuckets" : 8, - "isSmart" : false, - "isSystem" : true, - "isVolatile" : false, - "journalSize" : 1048576, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "name" : "_aqlfunctions", - "numberOfShards" : 1, - "planId" : "13", - "replicationFactor" : 2, - "shardKeys" : [ - "_key" - ], - "shards" : { - }, - "status" : 3, - "type" : 2, - "version" : 7, - "waitForSync" : false - } - }, - { - "indexes" : [ ], - "parameters" : { - "allowUserKeys" : true, - "cid" : "6", - "count" : 0, - "deleted" : false, - "doCompact" : true, - "globallyUniqueId" : "_graphs", - "id" : "6", - "indexBuckets" : 8, - "isSmart" : false, - "isSystem" : true, - "isVolatile" : false, - "journalSize" : 1048576, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "name" : "_graphs", - "numberOfShards" : 1, - "planId" : "6", - "replicationFactor" : 2, - "shardKeys" : [ - "_key" - ], - "shards" : { - }, - "status" : 3, - "type" : 2, - "version" : 7, - "waitForSync" : false - } - }, - { - "indexes" : [ ], - "parameters" : { - "allowUserKeys" : true, - "cid" : "2", - "count" : 0, - "deleted" : false, - "doCompact" : true, - "globallyUniqueId" : "_iresearch_analyzers", - "id" : "2", - "indexBuckets" : 8, - "isSmart" : false, - "isSystem" : true, - "isVolatile" : false, - "journalSize" : 33554432, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "name" : "_iresearch_analyzers", - "numberOfShards" : 1, - "planId" : "2", - "replicationFactor" : 1, - "shardKeys" : [ - "_key" - ], - "shards" : { - }, - "status" : 3, - "type" : 2, - "version" : 7, - "waitForSync" : false - } - }, - { - "indexes" : [ - { - "id" : "11", - "type" : "hash", - "fields" : [ - "user" - ], - "unique" : true, - "sparse" : true, - "deduplicate" : true - } - ], - "parameters" : { - "allowUserKeys" : true, - "cid" : "8", - "count" : 0, - "deleted" : false, - "doCompact" : true, - "globallyUniqueId" : "_users", - "id" : "8", - "indexBuckets" : 8, - "isSmart" : false, - "isSystem" : true, - "isVolatile" : false, - "journalSize" : 1048576, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 104933 - }, - "name" : "_users", - "numberOfShards" : 1, - "planId" : "8", - "replicationFactor" : 2, - "shardKeys" : [ - "_key" - ], - "shards" : { - }, - "status" : 3, - "type" : 2, - "version" : 7, - "waitForSync" : false - } - }, - { - "indexes" : [ ], - "parameters" : { - "allowUserKeys" : true, - "cid" : "96", - "count" : 0, - "deleted" : false, - "doCompact" : true, - "globallyUniqueId" : "h8B2B671BCFD0/96", - "id" : "96", - "indexBuckets" : 8, - "isSmart" : false, - "isSystem" : false, - "isVolatile" : false, - "journalSize" : 33554432, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "name" : "animals", - "numberOfShards" : 1, - "planId" : "96", - "replicationFactor" : 1, - "shardKeys" : [ - "_key" - ], - "shards" : { - }, - "status" : 3, - "type" : 2, - "version" : 7, - "waitForSync" : false - } - }, - { - "indexes" : [ ], - "parameters" : { - "allowUserKeys" : true, - "cid" : "87", - "count" : 0, - "deleted" : false, - "doCompact" : true, - "globallyUniqueId" : "h8B2B671BCFD0/87", - "id" : "87", - "indexBuckets" : 8, - "isSmart" : false, - "isSystem" : false, - "isVolatile" : false, - "journalSize" : 33554432, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "name" : "demo", - "numberOfShards" : 1, - "planId" : "87", - "replicationFactor" : 1, - "shardKeys" : [ - "_key" - ], - "shards" : { - }, - "status" : 3, - "type" : 2, - "version" : 7, - "waitForSync" : false - } - } - ], - "views" : [ - { - "globallyUniqueId" : "h8B2B671BCFD0/102", - "id" : "102", - "name" : "demoView", - "type" : "arangosearch", - "cleanupIntervalStep" : 10, - "commitIntervalMsec" : 60000, - "consolidationIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "writebufferActive" : 0, - "writebufferIdle" : 64, - "writebufferSizeMax" : 33554432, - "links" : { - } - } - ], - "state" : { - "running" : true, - "lastLogTick" : "104983", - "lastUncommittedLogTick" : "105016", - "totalEvents" : 35268, - "time" : "2019-02-20T10:33:06Z" - }, - "tick" : "105016" -} diff --git a/Documentation/Examples/RestReplicationInventory_mmfiles.generated b/Documentation/Examples/RestReplicationInventory_mmfiles.generated deleted file mode 100644 index 2a8a8034ff37..000000000000 --- a/Documentation/Examples/RestReplicationInventory_mmfiles.generated +++ /dev/null @@ -1,340 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/replication/inventory - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "collections" : [ - { - "indexes" : [ ], - "parameters" : { - "allowUserKeys" : true, - "cid" : "32", - "count" : 0, - "deleted" : false, - "doCompact" : true, - "globallyUniqueId" : "_appbundles", - "id" : "32", - "indexBuckets" : 8, - "isSmart" : false, - "isSystem" : true, - "isVolatile" : false, - "journalSize" : 1048576, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "name" : "_appbundles", - "numberOfShards" : 1, - "planId" : "32", - "replicationFactor" : 2, - "shardKeys" : [ - "_key" - ], - "shards" : { - }, - "status" : 3, - "type" : 2, - "version" : 7, - "waitForSync" : false - } - }, - { - "indexes" : [ - { - "id" : "30", - "type" : "hash", - "fields" : [ - "mount" - ], - "unique" : true, - "sparse" : true, - "deduplicate" : true - } - ], - "parameters" : { - "allowUserKeys" : true, - "cid" : "27", - "count" : 0, - "deleted" : false, - "doCompact" : true, - "globallyUniqueId" : "_apps", - "id" : "27", - "indexBuckets" : 8, - "isSmart" : false, - "isSystem" : true, - "isVolatile" : false, - "journalSize" : 1048576, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 41 - }, - "name" : "_apps", - "numberOfShards" : 1, - "planId" : "27", - "replicationFactor" : 2, - "shardKeys" : [ - "_key" - ], - "shards" : { - }, - "status" : 3, - "type" : 2, - "version" : 7, - "waitForSync" : false - } - }, - { - "indexes" : [ ], - "parameters" : { - "allowUserKeys" : true, - "cid" : "13", - "count" : 0, - "deleted" : false, - "doCompact" : true, - "globallyUniqueId" : "_aqlfunctions", - "id" : "13", - "indexBuckets" : 8, - "isSmart" : false, - "isSystem" : true, - "isVolatile" : false, - "journalSize" : 1048576, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "name" : "_aqlfunctions", - "numberOfShards" : 1, - "planId" : "13", - "replicationFactor" : 2, - "shardKeys" : [ - "_key" - ], - "shards" : { - }, - "status" : 3, - "type" : 2, - "version" : 7, - "waitForSync" : false - } - }, - { - "indexes" : [ ], - "parameters" : { - "allowUserKeys" : true, - "cid" : "6", - "count" : 0, - "deleted" : false, - "doCompact" : true, - "globallyUniqueId" : "_graphs", - "id" : "6", - "indexBuckets" : 8, - "isSmart" : false, - "isSystem" : true, - "isVolatile" : false, - "journalSize" : 1048576, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "name" : "_graphs", - "numberOfShards" : 1, - "planId" : "6", - "replicationFactor" : 2, - "shardKeys" : [ - "_key" - ], - "shards" : { - }, - "status" : 3, - "type" : 2, - "version" : 7, - "waitForSync" : false - } - }, - { - "indexes" : [ ], - "parameters" : { - "allowUserKeys" : true, - "cid" : "2", - "count" : 0, - "deleted" : false, - "doCompact" : true, - "globallyUniqueId" : "_iresearch_analyzers", - "id" : "2", - "indexBuckets" : 8, - "isSmart" : false, - "isSystem" : true, - "isVolatile" : false, - "journalSize" : 33554432, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "name" : "_iresearch_analyzers", - "numberOfShards" : 1, - "planId" : "2", - "replicationFactor" : 1, - "shardKeys" : [ - "_key" - ], - "shards" : { - }, - "status" : 3, - "type" : 2, - "version" : 7, - "waitForSync" : false - } - }, - { - "indexes" : [ - { - "id" : "11", - "type" : "hash", - "fields" : [ - "user" - ], - "unique" : true, - "sparse" : true, - "deduplicate" : true - } - ], - "parameters" : { - "allowUserKeys" : true, - "cid" : "8", - "count" : 0, - "deleted" : false, - "doCompact" : true, - "globallyUniqueId" : "_users", - "id" : "8", - "indexBuckets" : 8, - "isSmart" : false, - "isSystem" : true, - "isVolatile" : false, - "journalSize" : 1048576, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 104933 - }, - "name" : "_users", - "numberOfShards" : 1, - "planId" : "8", - "replicationFactor" : 2, - "shardKeys" : [ - "_key" - ], - "shards" : { - }, - "status" : 3, - "type" : 2, - "version" : 7, - "waitForSync" : false - } - }, - { - "indexes" : [ ], - "parameters" : { - "allowUserKeys" : true, - "cid" : "96", - "count" : 0, - "deleted" : false, - "doCompact" : true, - "globallyUniqueId" : "h8B2B671BCFD0/96", - "id" : "96", - "indexBuckets" : 8, - "isSmart" : false, - "isSystem" : false, - "isVolatile" : false, - "journalSize" : 33554432, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "name" : "animals", - "numberOfShards" : 1, - "planId" : "96", - "replicationFactor" : 1, - "shardKeys" : [ - "_key" - ], - "shards" : { - }, - "status" : 3, - "type" : 2, - "version" : 7, - "waitForSync" : false - } - }, - { - "indexes" : [ ], - "parameters" : { - "allowUserKeys" : true, - "cid" : "87", - "count" : 0, - "deleted" : false, - "doCompact" : true, - "globallyUniqueId" : "h8B2B671BCFD0/87", - "id" : "87", - "indexBuckets" : 8, - "isSmart" : false, - "isSystem" : false, - "isVolatile" : false, - "journalSize" : 33554432, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "name" : "demo", - "numberOfShards" : 1, - "planId" : "87", - "replicationFactor" : 1, - "shardKeys" : [ - "_key" - ], - "shards" : { - }, - "status" : 3, - "type" : 2, - "version" : 7, - "waitForSync" : false - } - } - ], - "views" : [ - { - "globallyUniqueId" : "h8B2B671BCFD0/102", - "id" : "102", - "name" : "demoView", - "type" : "arangosearch", - "cleanupIntervalStep" : 10, - "commitIntervalMsec" : 60000, - "consolidationIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "writebufferActive" : 0, - "writebufferIdle" : 64, - "writebufferSizeMax" : 33554432, - "links" : { - } - } - ], - "state" : { - "running" : true, - "lastLogTick" : "104983", - "lastUncommittedLogTick" : "104989", - "totalEvents" : 35260, - "time" : "2019-02-20T10:33:06Z" - }, - "tick" : "104990" -} diff --git a/Documentation/Examples/RestReplicationLoggerFirstTick.generated b/Documentation/Examples/RestReplicationLoggerFirstTick.generated deleted file mode 100644 index d7ee7bbbc1ce..000000000000 --- a/Documentation/Examples/RestReplicationLoggerFirstTick.generated +++ /dev/null @@ -1,9 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/replication/logger-first-tick - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "firstTick" : "5" -} diff --git a/Documentation/Examples/RestReplicationLoggerFollowBufferLimit.generated b/Documentation/Examples/RestReplicationLoggerFollowBufferLimit.generated deleted file mode 100644 index 5c8a3e4377ca..000000000000 --- a/Documentation/Examples/RestReplicationLoggerFollowBufferLimit.generated +++ /dev/null @@ -1,62 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/replication/logger-follow?from=105025&chunkSize=400 - -HTTP/1.1 OK -content-type: application/x-arango-dump; charset=utf-8 -x-arango-replication-active: true -x-arango-replication-checkmore: true -x-arango-replication-frompresent: true -x-arango-replication-lastincluded: 105029 -x-arango-replication-lastscanned: 105029 -x-arango-replication-lasttick: 105046 -x-content-type-options: nosniff - -{ - "tick" : "105029", - "type" : 2000, - "database" : "1", - "cid" : "105028", - "cname" : "products", - "data" : { - "allowUserKeys" : true, - "cid" : "105028", - "count" : 0, - "deleted" : false, - "doCompact" : true, - "globallyUniqueId" : "h8B2B671BCFD0/105028", - "id" : "105028", - "indexBuckets" : 8, - "indexes" : [ - { - "id" : "0", - "type" : "primary", - "fields" : [ - "_key" - ], - "unique" : true, - "sparse" : false - } - ], - "isSmart" : false, - "isSystem" : false, - "isVolatile" : false, - "journalSize" : 33554432, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "name" : "products", - "numberOfShards" : 1, - "planId" : "105028", - "replicationFactor" : 1, - "shardKeys" : [ - "_key" - ], - "shards" : { - }, - "status" : 3, - "type" : 2, - "version" : 7, - "waitForSync" : false - } -} diff --git a/Documentation/Examples/RestReplicationLoggerFollowEmpty.generated b/Documentation/Examples/RestReplicationLoggerFollowEmpty.generated deleted file mode 100644 index 9e5f1e275663..000000000000 --- a/Documentation/Examples/RestReplicationLoggerFollowEmpty.generated +++ /dev/null @@ -1,12 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/replication/logger-follow?from=105046 - -HTTP/1.1 No Content -content-type: application/x-arango-dump; charset=utf-8 -x-arango-replication-active: true -x-arango-replication-checkmore: false -x-arango-replication-frompresent: true -x-arango-replication-lastincluded: 0 -x-arango-replication-lastscanned: 105046 -x-arango-replication-lasttick: 105046 -x-content-type-options: nosniff - diff --git a/Documentation/Examples/RestReplicationLoggerFollowSome.generated b/Documentation/Examples/RestReplicationLoggerFollowSome.generated deleted file mode 100644 index 70db612e5b6b..000000000000 --- a/Documentation/Examples/RestReplicationLoggerFollowSome.generated +++ /dev/null @@ -1,278 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/replication/logger-follow?from=105046 - -HTTP/1.1 OK -content-type: application/x-arango-dump; charset=utf-8 -x-arango-replication-active: true -x-arango-replication-checkmore: false -x-arango-replication-frompresent: true -x-arango-replication-lastincluded: 105067 -x-arango-replication-lastscanned: 105067 -x-arango-replication-lasttick: 105067 -x-content-type-options: nosniff - -{ - "tick" : "105050", - "type" : 2000, - "database" : "1", - "cid" : "105049", - "cname" : "products", - "data" : { - "allowUserKeys" : true, - "cid" : "105049", - "count" : 0, - "deleted" : false, - "doCompact" : true, - "globallyUniqueId" : "h8B2B671BCFD0/105049", - "id" : "105049", - "indexBuckets" : 8, - "indexes" : [ - { - "id" : "0", - "type" : "primary", - "fields" : [ - "_key" - ], - "unique" : true, - "sparse" : false - } - ], - "isSmart" : false, - "isSystem" : false, - "isVolatile" : false, - "journalSize" : 33554432, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "name" : "products", - "numberOfShards" : 1, - "planId" : "105049", - "replicationFactor" : 1, - "shardKeys" : [ - "_key" - ], - "shards" : { - }, - "status" : 3, - "type" : 2, - "version" : 7, - "waitForSync" : false - } -}↩ -{ - "tick" : "105053", - "type" : 2300, - "tid" : "0", - "database" : "1", - "cid" : "8", - "cname" : "_users", - "data" : { - "_key" : "58", - "_id" : "_users/58", - "_rev" : "_YOn1VSi--_", - "user" : "root", - "source" : "LOCAL", - "authData" : { - "active" : true, - "simple" : { - "hash" : "ba63424cac2432f605d770a3a2ca1c066f164ee2e022b3f6fa1c41bfa2391f6c", - "salt" : "93971d8d", - "method" : "sha256" - } - }, - "databases" : { - "_system" : { - "permissions" : { - "read" : true, - "write" : true - }, - "collections" : { - "demo" : { - "permissions" : { - "read" : true, - "write" : true - } - }, - "animals" : { - "permissions" : { - "read" : true, - "write" : true - } - }, - "products" : { - "permissions" : { - "read" : true, - "write" : true - } - }, - "*" : { - "permissions" : { - "read" : true, - "write" : true - } - }, - "products1" : { - "permissions" : { - "read" : true, - "write" : true - } - } - } - }, - "*" : { - "permissions" : { - "read" : true, - "write" : true - }, - "collections" : { - "*" : { - "permissions" : { - "read" : true, - "write" : true - } - } - } - } - } - } -}↩ -{ - "tick" : "105057", - "type" : 2300, - "tid" : "0", - "database" : "1", - "cid" : "105049", - "cname" : "products", - "data" : { - "_key" : "p1", - "_id" : "_unknown/p1", - "_rev" : "_YOn1VSm--_", - "name" : "flux compensator" - } -}↩ -{ - "tick" : "105059", - "type" : 2300, - "tid" : "0", - "database" : "1", - "cid" : "105049", - "cname" : "products", - "data" : { - "_key" : "p2", - "_id" : "_unknown/p2", - "_rev" : "_YOn1VSm--B", - "name" : "hybrid hovercraft", - "hp" : 5100 - } -}↩ -{ - "tick" : "105061", - "type" : 2302, - "tid" : "0", - "database" : "1", - "cid" : "105049", - "cname" : "products", - "data" : { - "_key" : "p1", - "_rev" : "_YOn1VSm--D" - } -}↩ -{ - "tick" : "105063", - "type" : 2300, - "tid" : "0", - "database" : "1", - "cid" : "105049", - "cname" : "products", - "data" : { - "_key" : "p2", - "_id" : "_unknown/p2", - "_rev" : "_YOn1VSq--_", - "name" : "broken hovercraft", - "hp" : 5100 - } -}↩ -{ - "tick" : "105064", - "type" : 2001, - "database" : "1", - "cid" : "105049", - "cname" : "products", - "data" : { - "id" : "105049", - "name" : "products", - "cuid" : "h8B2B671BCFD0/105049" - } -}↩ -{ - "tick" : "105067", - "type" : 2300, - "tid" : "0", - "database" : "1", - "cid" : "8", - "cname" : "_users", - "data" : { - "_key" : "58", - "_id" : "_users/58", - "_rev" : "_YOn1VTC--_", - "user" : "root", - "source" : "LOCAL", - "authData" : { - "active" : true, - "simple" : { - "hash" : "ba63424cac2432f605d770a3a2ca1c066f164ee2e022b3f6fa1c41bfa2391f6c", - "salt" : "93971d8d", - "method" : "sha256" - } - }, - "databases" : { - "*" : { - "permissions" : { - "read" : true, - "write" : true - }, - "collections" : { - "*" : { - "permissions" : { - "read" : true, - "write" : true - } - } - } - }, - "_system" : { - "permissions" : { - "read" : true, - "write" : true - }, - "collections" : { - "products1" : { - "permissions" : { - "read" : true, - "write" : true - } - }, - "*" : { - "permissions" : { - "read" : true, - "write" : true - } - }, - "demo" : { - "permissions" : { - "read" : true, - "write" : true - } - }, - "animals" : { - "permissions" : { - "read" : true, - "write" : true - } - } - } - } - } - } -}↩ diff --git a/Documentation/Examples/RestReplicationLoggerStateActive.generated b/Documentation/Examples/RestReplicationLoggerStateActive.generated deleted file mode 100644 index 2161a48100f6..000000000000 --- a/Documentation/Examples/RestReplicationLoggerStateActive.generated +++ /dev/null @@ -1,21 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/replication/logger-state - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "state" : { - "running" : true, - "lastLogTick" : "105067", - "lastUncommittedLogTick" : "105067", - "totalEvents" : 35288, - "time" : "2019-02-20T10:33:10Z" - }, - "server" : { - "version" : "3.5.0-devel", - "serverId" : "153018529730512", - "engine" : "mmfiles" - }, - "clients" : [ ] -} diff --git a/Documentation/Examples/RestReplicationLoggerTickRanges.generated b/Documentation/Examples/RestReplicationLoggerTickRanges.generated deleted file mode 100644 index 1992f352d7ff..000000000000 --- a/Documentation/Examples/RestReplicationLoggerTickRanges.generated +++ /dev/null @@ -1,38 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/replication/logger-tick-ranges - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -[ - { - "datafile" : "/tmp/arangosh_uprJb4/tmp-27793-56941049/data/journals/logfile-3.db", - "status" : "collected", - "tickMin" : "5", - "tickMax" : "103215" - }, - { - "datafile" : "/tmp/arangosh_uprJb4/tmp-27793-56941049/data/journals/logfile-85.db", - "status" : "collected", - "tickMin" : "103229", - "tickMax" : "103352" - }, - { - "datafile" : "/tmp/arangosh_uprJb4/tmp-27793-56941049/data/journals/logfile-232.db", - "status" : "collected", - "tickMin" : "103360", - "tickMax" : "104964" - }, - { - "datafile" : "/tmp/arangosh_uprJb4/tmp-27793-56941049/data/journals/logfile-103218.db", - "status" : "collected", - "tickMin" : "104968", - "tickMax" : "104980" - }, - { - "datafile" : "/tmp/arangosh_uprJb4/tmp-27793-56941049/data/journals/logfile-103355.db", - "status" : "open", - "tickMin" : "104986", - "tickMax" : "105067" - } -] diff --git a/Documentation/Examples/RestReplicationServerId.generated b/Documentation/Examples/RestReplicationServerId.generated deleted file mode 100644 index 2c0702e7e18a..000000000000 --- a/Documentation/Examples/RestReplicationServerId.generated +++ /dev/null @@ -1,9 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/replication/server-id - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "serverId" : "153018529730512" -} diff --git a/Documentation/Examples/RestRevokeCollection.generated b/Documentation/Examples/RestRevokeCollection.generated deleted file mode 100644 index 7f8664e2c771..000000000000 --- a/Documentation/Examples/RestRevokeCollection.generated +++ /dev/null @@ -1,10 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/user/admin@myapp/database/_system/reports - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 202 -} diff --git a/Documentation/Examples/RestRevokeDatabase.generated b/Documentation/Examples/RestRevokeDatabase.generated deleted file mode 100644 index 464f15360186..000000000000 --- a/Documentation/Examples/RestRevokeDatabase.generated +++ /dev/null @@ -1,10 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/user/admin@myapp/database/_system - -HTTP/1.1 Accepted -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 202 -} diff --git a/Documentation/Examples/RestSimpleAllBatch.generated b/Documentation/Examples/RestSimpleAllBatch.generated deleted file mode 100644 index 6f1a4010fff2..000000000000 --- a/Documentation/Examples/RestSimpleAllBatch.generated +++ /dev/null @@ -1,49 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/all <<EOF -{ "collection": "products", "batchSize" : 3 } -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - { - "_key" : "105120", - "_id" : "products/105120", - "_rev" : "_YOn1XS6--H", - "Hello5" : "World5" - }, - { - "_key" : "105117", - "_id" : "products/105117", - "_rev" : "_YOn1XS6--F", - "Hello4" : "World4" - }, - { - "_key" : "105114", - "_id" : "products/105114", - "_rev" : "_YOn1XS6--D", - "Hello3" : "World3" - } - ], - "hasMore" : true, - "id" : "105123", - "count" : 5, - "extra" : { - "stats" : { - "writesExecuted" : 0, - "writesIgnored" : 0, - "scannedFull" : 5, - "scannedIndex" : 0, - "filtered" : 0, - "httpRequests" : 0, - "executionTime" : 0.00013184547424316406, - "peakMemoryUsage" : 17984 - }, - "warnings" : [ ] - }, - "cached" : false, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestSimpleAllSkipLimit.generated b/Documentation/Examples/RestSimpleAllSkipLimit.generated deleted file mode 100644 index 3db420d9066c..000000000000 --- a/Documentation/Examples/RestSimpleAllSkipLimit.generated +++ /dev/null @@ -1,42 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/all <<EOF -{ "collection": "products", "skip": 2, "limit" : 2 } -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - { - "_key" : "105136", - "_id" : "products/105136", - "_rev" : "_YOn1XT6--_", - "Hello1" : "World1" - }, - { - "_key" : "105149", - "_id" : "products/105149", - "_rev" : "_YOn1XU---D", - "Hello5" : "World5" - } - ], - "hasMore" : false, - "count" : 2, - "cached" : false, - "extra" : { - "stats" : { - "writesExecuted" : 0, - "writesIgnored" : 0, - "scannedFull" : 4, - "scannedIndex" : 0, - "filtered" : 0, - "httpRequests" : 0, - "executionTime" : 0.00012993812561035156, - "peakMemoryUsage" : 18328 - }, - "warnings" : [ ] - }, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestSimpleAny.generated b/Documentation/Examples/RestSimpleAny.generated deleted file mode 100644 index 5a68ff6d2a7e..000000000000 --- a/Documentation/Examples/RestSimpleAny.generated +++ /dev/null @@ -1,20 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/any <<EOF -{ - "collection" : "products" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "document" : { - "_key" : "105168", - "_id" : "products/105168", - "_rev" : "_YOn1XU6--D", - "Hello2" : "World2" - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestSimpleByExample.generated b/Documentation/Examples/RestSimpleByExample.generated deleted file mode 100644 index 5652d237760f..000000000000 --- a/Documentation/Examples/RestSimpleByExample.generated +++ /dev/null @@ -1,70 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/by-example <<EOF -{ - "collection" : "products", - "example" : { - "i" : 1 - } -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - { - "_key" : "105192", - "_id" : "products/105192", - "_rev" : "_YOn1XW---_", - "a" : { - "k" : 1, - "j" : 1 - }, - "i" : 1 - }, - { - "_key" : "105202", - "_id" : "products/105202", - "_rev" : "_YOn1XW---F", - "a" : { - "k" : 2, - "j" : 2 - }, - "i" : 1 - }, - { - "_key" : "105196", - "_id" : "products/105196", - "_rev" : "_YOn1XW---B", - "a" : { - "j" : 1 - }, - "i" : 1 - }, - { - "_key" : "105199", - "_id" : "products/105199", - "_rev" : "_YOn1XW---D", - "i" : 1 - } - ], - "hasMore" : false, - "count" : 4, - "cached" : false, - "extra" : { - "stats" : { - "writesExecuted" : 0, - "writesIgnored" : 0, - "scannedFull" : 4, - "scannedIndex" : 0, - "filtered" : 0, - "httpRequests" : 0, - "executionTime" : 0.0002498626708984375, - "peakMemoryUsage" : 68336 - }, - "warnings" : [ ] - }, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestSimpleByExample2.generated b/Documentation/Examples/RestSimpleByExample2.generated deleted file mode 100644 index 859c9c5abff2..000000000000 --- a/Documentation/Examples/RestSimpleByExample2.generated +++ /dev/null @@ -1,54 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/by-example <<EOF -{ - "collection" : "products", - "example" : { - "a.j" : 1 - } -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - { - "_key" : "105217", - "_id" : "products/105217", - "_rev" : "_YOn1XXC--_", - "a" : { - "k" : 1, - "j" : 1 - }, - "i" : 1 - }, - { - "_key" : "105221", - "_id" : "products/105221", - "_rev" : "_YOn1XXC--B", - "a" : { - "j" : 1 - }, - "i" : 1 - } - ], - "hasMore" : false, - "count" : 2, - "cached" : false, - "extra" : { - "stats" : { - "writesExecuted" : 0, - "writesIgnored" : 0, - "scannedFull" : 4, - "scannedIndex" : 0, - "filtered" : 2, - "httpRequests" : 0, - "executionTime" : 0.0002300739288330078, - "peakMemoryUsage" : 68616 - }, - "warnings" : [ ] - }, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestSimpleByExample3.generated b/Documentation/Examples/RestSimpleByExample3.generated deleted file mode 100644 index 017497482b15..000000000000 --- a/Documentation/Examples/RestSimpleByExample3.generated +++ /dev/null @@ -1,46 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/by-example <<EOF -{ - "collection" : "products", - "example" : { - "a" : { - "j" : 1 - } - } -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - { - "_key" : "105246", - "_id" : "products/105246", - "_rev" : "_YOn1XYC--D", - "a" : { - "j" : 1 - }, - "i" : 1 - } - ], - "hasMore" : false, - "count" : 1, - "cached" : false, - "extra" : { - "stats" : { - "writesExecuted" : 0, - "writesIgnored" : 0, - "scannedFull" : 4, - "scannedIndex" : 0, - "filtered" : 3, - "httpRequests" : 0, - "executionTime" : 0.00022554397583007812, - "peakMemoryUsage" : 68896 - }, - "warnings" : [ ] - }, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestSimpleFirstExample.generated b/Documentation/Examples/RestSimpleFirstExample.generated deleted file mode 100644 index c269e6ddf623..000000000000 --- a/Documentation/Examples/RestSimpleFirstExample.generated +++ /dev/null @@ -1,27 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/first-example <<EOF -{ - "collection" : "products", - "example" : { - "i" : 1 - } -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "document" : { - "_key" : "105277", - "_id" : "products/105277", - "_rev" : "_YOn1XZG--D", - "a" : { - "k" : 2, - "j" : 2 - }, - "i" : 1 - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestSimpleFirstExampleNotFound.generated b/Documentation/Examples/RestSimpleFirstExampleNotFound.generated deleted file mode 100644 index 05a07b5aa861..000000000000 --- a/Documentation/Examples/RestSimpleFirstExampleNotFound.generated +++ /dev/null @@ -1,19 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/first-example <<EOF -{ - "collection" : "products", - "example" : { - "l" : 1 - } -} -EOF - -HTTP/1.1 Not Found -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "code" : 404, - "errorNum" : 404, - "errorMessage" : "no match" -} diff --git a/Documentation/Examples/RestSimpleFulltext.generated b/Documentation/Examples/RestSimpleFulltext.generated deleted file mode 100644 index 5d901eb2f1a5..000000000000 --- a/Documentation/Examples/RestSimpleFulltext.generated +++ /dev/null @@ -1,32 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/fulltext <<EOF -{ - "collection" : "products", - "attribute" : "text", - "query" : "word" -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - { - "_key" : "105317", - "_id" : "products/105317", - "_rev" : "_YOn1XbK--_", - "text" : "this text contains word" - }, - { - "_key" : "105321", - "_id" : "products/105321", - "_rev" : "_YOn1XbK--B", - "text" : "this text also has a word" - } - ], - "hasMore" : false, - "count" : 2, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestSimpleLookup.generated b/Documentation/Examples/RestSimpleLookup.generated deleted file mode 100644 index 98cc9bfaec18..000000000000 --- a/Documentation/Examples/RestSimpleLookup.generated +++ /dev/null @@ -1,88 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/lookup-by-keys <<EOF -{ - "keys" : [ - "test0", - "test1", - "test2", - "test3", - "test4", - "test5", - "test6", - "test7", - "test8", - "test9" - ], - "collection" : "test" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "documents" : [ - { - "_key" : "test0", - "_id" : "test/test0", - "_rev" : "_YOn1XdO--B", - "value" : 0 - }, - { - "_key" : "test1", - "_id" : "test/test1", - "_rev" : "_YOn1XdS--_", - "value" : 1 - }, - { - "_key" : "test2", - "_id" : "test/test2", - "_rev" : "_YOn1XdS--B", - "value" : 2 - }, - { - "_key" : "test3", - "_id" : "test/test3", - "_rev" : "_YOn1XdS--D", - "value" : 3 - }, - { - "_key" : "test4", - "_id" : "test/test4", - "_rev" : "_YOn1XdS--F", - "value" : 4 - }, - { - "_key" : "test5", - "_id" : "test/test5", - "_rev" : "_YOn1XdW--_", - "value" : 5 - }, - { - "_key" : "test6", - "_id" : "test/test6", - "_rev" : "_YOn1XdW--B", - "value" : 6 - }, - { - "_key" : "test7", - "_id" : "test/test7", - "_rev" : "_YOn1XdW--D", - "value" : 7 - }, - { - "_key" : "test8", - "_id" : "test/test8", - "_rev" : "_YOn1XdW--F", - "value" : 8 - }, - { - "_key" : "test9", - "_id" : "test/test9", - "_rev" : "_YOn1XdW--H", - "value" : 9 - } - ], - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestSimpleLookupNotFound.generated b/Documentation/Examples/RestSimpleLookupNotFound.generated deleted file mode 100644 index 10503b1733b3..000000000000 --- a/Documentation/Examples/RestSimpleLookupNotFound.generated +++ /dev/null @@ -1,20 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/lookup-by-keys <<EOF -{ - "keys" : [ - "foo", - "bar", - "baz" - ], - "collection" : "test" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "documents" : [ ], - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestSimpleNear.generated b/Documentation/Examples/RestSimpleNear.generated deleted file mode 100644 index 7e9a3e123dac..000000000000 --- a/Documentation/Examples/RestSimpleNear.generated +++ /dev/null @@ -1,42 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/near <<EOF -{ - "collection" : "products", - "latitude" : 0, - "longitude" : 0, - "skip" : 1, - "limit" : 2 -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - { - "_key" : "105426", - "_id" : "products/105426", - "_rev" : "_YOn1XgW--D", - "name" : "Name/-0.002/", - "loc" : [ - -0.002, - 0 - ] - }, - { - "_key" : "105432", - "_id" : "products/105432", - "_rev" : "_YOn1XgW--H", - "name" : "Name/0.002/", - "loc" : [ - 0.002, - 0 - ] - } - ], - "hasMore" : false, - "count" : 2, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestSimpleNearDistance.generated b/Documentation/Examples/RestSimpleNearDistance.generated deleted file mode 100644 index 34ae489448b9..000000000000 --- a/Documentation/Examples/RestSimpleNearDistance.generated +++ /dev/null @@ -1,56 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/near <<EOF -{ - "collection" : "products", - "latitude" : 0, - "longitude" : 0, - "skip" : 1, - "limit" : 3, - "distance" : "distance" -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - { - "_id" : "products/105477", - "_key" : "105477", - "_rev" : "_YOn1Xie--_", - "loc" : [ - -0.002, - 0 - ], - "name" : "Name/-0.002/", - "distance" : 222.3898532891175 - }, - { - "_id" : "products/105483", - "_key" : "105483", - "_rev" : "_YOn1Xie--D", - "loc" : [ - 0.002, - 0 - ], - "name" : "Name/0.002/", - "distance" : 222.3898532891175 - }, - { - "_id" : "products/105486", - "_key" : "105486", - "_rev" : "_YOn1Xie--F", - "loc" : [ - 0.004, - 0 - ], - "name" : "Name/0.004/", - "distance" : 444.779706578235 - } - ], - "hasMore" : false, - "count" : 3, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestSimpleRange.generated b/Documentation/Examples/RestSimpleRange.generated deleted file mode 100644 index 0ead4b6ada7a..000000000000 --- a/Documentation/Examples/RestSimpleRange.generated +++ /dev/null @@ -1,33 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/range <<EOF -{ - "collection" : "products", - "attribute" : "i", - "left" : 2, - "right" : 4 -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - { - "_key" : "105519", - "_id" : "products/105519", - "_rev" : "_YOn1Xke--B", - "i" : 2 - }, - { - "_key" : "105522", - "_id" : "products/105522", - "_rev" : "_YOn1Xke--D", - "i" : 3 - } - ], - "hasMore" : false, - "count" : 2, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestSimpleRemove.generated b/Documentation/Examples/RestSimpleRemove.generated deleted file mode 100644 index fc6cb17aca43..000000000000 --- a/Documentation/Examples/RestSimpleRemove.generated +++ /dev/null @@ -1,28 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/remove-by-keys <<EOF -{ - "keys" : [ - "test0", - "test1", - "test2", - "test3", - "test4", - "test5", - "test6", - "test7", - "test8", - "test9" - ], - "collection" : "test" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "removed" : 10, - "ignored" : 0, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestSimpleRemoveByExample.generated b/Documentation/Examples/RestSimpleRemoveByExample.generated deleted file mode 100644 index 92050c390718..000000000000 --- a/Documentation/Examples/RestSimpleRemoveByExample.generated +++ /dev/null @@ -1,20 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/remove-by-example <<EOF -{ - "collection" : "products", - "example" : { - "a" : { - "j" : 1 - } - } -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "deleted" : 1, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestSimpleRemoveByExample_1.generated b/Documentation/Examples/RestSimpleRemoveByExample_1.generated deleted file mode 100644 index bfd6decec48c..000000000000 --- a/Documentation/Examples/RestSimpleRemoveByExample_1.generated +++ /dev/null @@ -1,22 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/remove-by-example <<EOF -{ - "collection" : "products", - "example" : { - "a" : { - "j" : 1 - } - }, - "waitForSync" : true, - "limit" : 2 -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "deleted" : 1, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestSimpleRemoveByExample_2.generated b/Documentation/Examples/RestSimpleRemoveByExample_2.generated deleted file mode 100644 index 0a5b4844ffb7..000000000000 --- a/Documentation/Examples/RestSimpleRemoveByExample_2.generated +++ /dev/null @@ -1,24 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/remove-by-example <<EOF -{ - "collection" : "products", - "example" : { - "a" : { - "j" : 1 - } - }, - "options" : { - "waitForSync" : true, - "limit" : 2 - } -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "deleted" : 1, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestSimpleRemoveNotFound.generated b/Documentation/Examples/RestSimpleRemoveNotFound.generated deleted file mode 100644 index 462f92c2df25..000000000000 --- a/Documentation/Examples/RestSimpleRemoveNotFound.generated +++ /dev/null @@ -1,21 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/remove-by-keys <<EOF -{ - "keys" : [ - "foo", - "bar", - "baz" - ], - "collection" : "test" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "removed" : 0, - "ignored" : 3, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestSimpleReplaceByExample.generated b/Documentation/Examples/RestSimpleReplaceByExample.generated deleted file mode 100644 index a92398861aaa..000000000000 --- a/Documentation/Examples/RestSimpleReplaceByExample.generated +++ /dev/null @@ -1,24 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/replace-by-example <<EOF -{ - "collection" : "products", - "example" : { - "a" : { - "j" : 1 - } - }, - "newValue" : { - "foo" : "bar" - }, - "limit" : 3 -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "replaced" : 1, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestSimpleReplaceByExampleWaitForSync.generated b/Documentation/Examples/RestSimpleReplaceByExampleWaitForSync.generated deleted file mode 100644 index 6c8c2b608647..000000000000 --- a/Documentation/Examples/RestSimpleReplaceByExampleWaitForSync.generated +++ /dev/null @@ -1,27 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/replace-by-example <<EOF -{ - "collection" : "products", - "example" : { - "a" : { - "j" : 1 - } - }, - "newValue" : { - "foo" : "bar" - }, - "options" : { - "limit" : 3, - "waitForSync" : true - } -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "replaced" : 1, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestSimpleUpdateByExample.generated b/Documentation/Examples/RestSimpleUpdateByExample.generated deleted file mode 100644 index e1e920b55fca..000000000000 --- a/Documentation/Examples/RestSimpleUpdateByExample.generated +++ /dev/null @@ -1,26 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/update-by-example <<EOF -{ - "collection" : "products", - "example" : { - "a" : { - "j" : 1 - } - }, - "newValue" : { - "a" : { - "j" : 22 - } - }, - "limit" : 3 -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "updated" : 1, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestSimpleUpdateByExample_1.generated b/Documentation/Examples/RestSimpleUpdateByExample_1.generated deleted file mode 100644 index 27d0f39478d8..000000000000 --- a/Documentation/Examples/RestSimpleUpdateByExample_1.generated +++ /dev/null @@ -1,29 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/update-by-example <<EOF -{ - "collection" : "products", - "example" : { - "a" : { - "j" : 1 - } - }, - "newValue" : { - "a" : { - "j" : 22 - } - }, - "options" : { - "limit" : 3, - "waitForSync" : true - } -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "updated" : 1, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestSimpleWithin.generated b/Documentation/Examples/RestSimpleWithin.generated deleted file mode 100644 index 1845fc91e23d..000000000000 --- a/Documentation/Examples/RestSimpleWithin.generated +++ /dev/null @@ -1,43 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/near <<EOF -{ - "collection" : "products", - "latitude" : 0, - "longitude" : 0, - "skip" : 1, - "limit" : 2, - "radius" : 500 -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - { - "_key" : "105839", - "_id" : "products/105839", - "_rev" : "_YOn1Xx6--D", - "name" : "Name/-0.002/", - "loc" : [ - -0.002, - 0 - ] - }, - { - "_key" : "105845", - "_id" : "products/105845", - "_rev" : "_YOn1Xy---_", - "name" : "Name/0.002/", - "loc" : [ - 0.002, - 0 - ] - } - ], - "hasMore" : false, - "count" : 2, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestSimpleWithinDistance.generated b/Documentation/Examples/RestSimpleWithinDistance.generated deleted file mode 100644 index 4352970ef3bb..000000000000 --- a/Documentation/Examples/RestSimpleWithinDistance.generated +++ /dev/null @@ -1,57 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/near <<EOF -{ - "collection" : "products", - "latitude" : 0, - "longitude" : 0, - "skip" : 1, - "limit" : 3, - "distance" : "distance", - "radius" : 300 -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - { - "_id" : "products/105890", - "_key" : "105890", - "_rev" : "_YOn1X1G--H", - "loc" : [ - -0.002, - 0 - ], - "name" : "Name/-0.002/", - "distance" : 222.3898532891175 - }, - { - "_id" : "products/105896", - "_key" : "105896", - "_rev" : "_YOn1X1K--B", - "loc" : [ - 0.002, - 0 - ], - "name" : "Name/0.002/", - "distance" : 222.3898532891175 - }, - { - "_id" : "products/105899", - "_key" : "105899", - "_rev" : "_YOn1X1K--D", - "loc" : [ - 0.004, - 0 - ], - "name" : "Name/0.004/", - "distance" : 444.779706578235 - } - ], - "hasMore" : false, - "count" : 3, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestSimpleWithinRectangle.generated b/Documentation/Examples/RestSimpleWithinRectangle.generated deleted file mode 100644 index bb021d617c79..000000000000 --- a/Documentation/Examples/RestSimpleWithinRectangle.generated +++ /dev/null @@ -1,44 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/simple/within-rectangle <<EOF -{ - "collection" : "products", - "latitude1" : 0, - "longitude1" : 0, - "latitude2" : 0.2, - "longitude2" : 0.2, - "skip" : 1, - "limit" : 2 -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : [ - { - "_key" : "105956", - "_id" : "products/105956", - "_rev" : "_YOn1X3O--J", - "name" : "Name/0.008/", - "loc" : [ - 0.008, - 0 - ] - }, - { - "_key" : "105953", - "_id" : "products/105953", - "_rev" : "_YOn1X3O--H", - "name" : "Name/0.006/", - "loc" : [ - 0.006, - 0 - ] - } - ], - "hasMore" : false, - "count" : 2, - "error" : false, - "code" : 201 -} diff --git a/Documentation/Examples/RestTasksCreate.generated b/Documentation/Examples/RestTasksCreate.generated deleted file mode 100644 index 395cb340a7e9..000000000000 --- a/Documentation/Examples/RestTasksCreate.generated +++ /dev/null @@ -1,28 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/tasks/ <<EOF -{ - "name" : "SampleTask", - "command" : "(function(params) { require('@arangodb').print(params); })(params)", - "params" : { - "foo" : "bar", - "bar" : "foo" - }, - "period" : 2 -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "id" : "105969", - "name" : "SampleTask", - "created" : 1550658791.3249488, - "type" : "periodic", - "period" : 2, - "offset" : 0, - "command" : "(function (params) { (function(params) { require('@arangodb').print(params); })(params) } )(params);", - "database" : "_system" -} -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/tasks/105969 - diff --git a/Documentation/Examples/RestTasksDelete.generated b/Documentation/Examples/RestTasksDelete.generated deleted file mode 100644 index e4df98e6b1ac..000000000000 --- a/Documentation/Examples/RestTasksDelete.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/tasks/SampleTask - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "result" : "(non-representable type)" -} diff --git a/Documentation/Examples/RestTasksDeleteFail.generated b/Documentation/Examples/RestTasksDeleteFail.generated deleted file mode 100644 index 033b0757a935..000000000000 --- a/Documentation/Examples/RestTasksDeleteFail.generated +++ /dev/null @@ -1,12 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/tasks/NoTaskWithThatName - -HTTP/1.1 Not Found -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "task not found", - "code" : 404, - "errorNum" : 1852 -} diff --git a/Documentation/Examples/RestTasksListAll.generated b/Documentation/Examples/RestTasksListAll.generated deleted file mode 100644 index ee587441849c..000000000000 --- a/Documentation/Examples/RestTasksListAll.generated +++ /dev/null @@ -1,18 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/tasks - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -[ - { - "id" : "55", - "name" : "user-defined task", - "created" : 1550658763.4988394, - "type" : "periodic", - "period" : 1, - "offset" : 0.000001, - "command" : "(function (params) { (function () {\n require('@arangodb/foxx/queues/manager').manage();\n })(params) } )(params);", - "database" : "_system" - } -] diff --git a/Documentation/Examples/RestTasksListNonExisting.generated b/Documentation/Examples/RestTasksListNonExisting.generated deleted file mode 100644 index ad984c879dde..000000000000 --- a/Documentation/Examples/RestTasksListNonExisting.generated +++ /dev/null @@ -1,12 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/tasks/non-existing-task - -HTTP/1.1 Not Found -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "task not found", - "code" : 404, - "errorNum" : 1852 -} diff --git a/Documentation/Examples/RestTasksListOne.generated b/Documentation/Examples/RestTasksListOne.generated deleted file mode 100644 index 9ff4aba0a2d4..000000000000 --- a/Documentation/Examples/RestTasksListOne.generated +++ /dev/null @@ -1,19 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/tasks <<EOF -{"id":"testTask","command":"console.log('Hello from task!');","offset":10000} -EOF - -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/tasks/testTask - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "id" : "testTask", - "name" : "user-defined task", - "created" : 1550658791.3349102, - "type" : "timed", - "offset" : 10000, - "command" : "(function (params) { console.log('Hello from task!'); } )(params);", - "database" : "_system" -} diff --git a/Documentation/Examples/RestTasksPutWithId.generated b/Documentation/Examples/RestTasksPutWithId.generated deleted file mode 100644 index eab255045e36..000000000000 --- a/Documentation/Examples/RestTasksPutWithId.generated +++ /dev/null @@ -1,27 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/tasks/sampleTask <<EOF -{ - "id" : "SampleTask", - "name" : "SampleTask", - "command" : "(function(params) { require('@arangodb').print(params); })(params)", - "params" : { - "foo" : "bar", - "bar" : "foo" - }, - "period" : 2 -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "id" : "sampleTask", - "name" : "SampleTask", - "created" : 1550658791.336974, - "type" : "periodic", - "period" : 2, - "offset" : 0, - "command" : "(function (params) { (function(params) { require('@arangodb').print(params); })(params) } )(params);", - "database" : "_system" -} diff --git a/Documentation/Examples/RestTransactionAbort.generated b/Documentation/Examples/RestTransactionAbort.generated deleted file mode 100644 index 874c51acf52c..000000000000 --- a/Documentation/Examples/RestTransactionAbort.generated +++ /dev/null @@ -1,19 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/transaction <<EOF -{ - "collections" : { - "read" : "products" - }, - "action" : "function () { throw 'doh!'; }" -} -EOF - -HTTP/1.1 Internal Server Error -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "doh!", - "code" : 500, - "errorNum" : 1650 -} diff --git a/Documentation/Examples/RestTransactionAbortInternal.generated b/Documentation/Examples/RestTransactionAbortInternal.generated deleted file mode 100644 index 9145505bab09..000000000000 --- a/Documentation/Examples/RestTransactionAbortInternal.generated +++ /dev/null @@ -1,19 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/transaction <<EOF -{ - "collections" : { - "write" : "products" - }, - "action" : "function () {var db = require('@arangodb').db;db.products.save({ _key: 'abc'});db.products.save({ _key: 'abc'});}" -} -EOF - -HTTP/1.1 Conflict -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : " - in index 0 of type primary over '_key'; conflicting key: abc", - "code" : 409, - "errorNum" : 1210 -} diff --git a/Documentation/Examples/RestTransactionBeginAbort.generated b/Documentation/Examples/RestTransactionBeginAbort.generated deleted file mode 100644 index 67b03c3a9847..000000000000 --- a/Documentation/Examples/RestTransactionBeginAbort.generated +++ /dev/null @@ -1,14 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --dump - http://localhost:8529/_api/transaction/106634 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "code" : 200, - "error" : false, - "result" : { - "id" : "106634", - "status" : "committed" - } -} diff --git a/Documentation/Examples/RestTransactionBeginCommit.generated b/Documentation/Examples/RestTransactionBeginCommit.generated deleted file mode 100644 index 2447d18fe129..000000000000 --- a/Documentation/Examples/RestTransactionBeginCommit.generated +++ /dev/null @@ -1,14 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/transaction/106646 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "code" : 200, - "error" : false, - "result" : { - "id" : "106646", - "status" : "aborted" - } -} diff --git a/Documentation/Examples/RestTransactionBeginNonExisting.generated b/Documentation/Examples/RestTransactionBeginNonExisting.generated deleted file mode 100644 index 627f408fcdf8..000000000000 --- a/Documentation/Examples/RestTransactionBeginNonExisting.generated +++ /dev/null @@ -1,18 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/transaction/begin <<EOF -{ - "collections" : { - "read" : "products" - } -} -EOF - -HTTP/1.1 Not Found -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "code" : 404, - "error" : true, - "errorMessage" : "collection or view not found:products", - "errorNum" : 1203 -} diff --git a/Documentation/Examples/RestTransactionBeginSingle.generated b/Documentation/Examples/RestTransactionBeginSingle.generated deleted file mode 100644 index 66d48ab52816..000000000000 --- a/Documentation/Examples/RestTransactionBeginSingle.generated +++ /dev/null @@ -1,20 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/transaction/begin <<EOF -{ - "collections" : { - "write" : "products" - } -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "code" : 201, - "error" : false, - "result" : { - "id" : "106660", - "status" : "running" - } -} diff --git a/Documentation/Examples/RestTransactionGet.generated b/Documentation/Examples/RestTransactionGet.generated deleted file mode 100644 index 5ad1d009c936..000000000000 --- a/Documentation/Examples/RestTransactionGet.generated +++ /dev/null @@ -1,14 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/transaction/106672 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "code" : 200, - "error" : false, - "result" : { - "id" : "106672", - "status" : "running" - } -} diff --git a/Documentation/Examples/RestTransactionMulti.generated b/Documentation/Examples/RestTransactionMulti.generated deleted file mode 100644 index 61a1917c60ac..000000000000 --- a/Documentation/Examples/RestTransactionMulti.generated +++ /dev/null @@ -1,21 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/transaction <<EOF -{ - "collections" : { - "write" : [ - "products", - "materials" - ] - }, - "action" : "function () {var db = require('@arangodb').db;db.products.save({});db.materials.save({});return 'worked!';}" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "result" : "worked!" -} diff --git a/Documentation/Examples/RestTransactionNonExisting.generated b/Documentation/Examples/RestTransactionNonExisting.generated deleted file mode 100644 index 7fb7321aae78..000000000000 --- a/Documentation/Examples/RestTransactionNonExisting.generated +++ /dev/null @@ -1,19 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/transaction <<EOF -{ - "collections" : { - "read" : "products" - }, - "action" : "function () { return true; }" -} -EOF - -HTTP/1.1 Not Found -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "collection or view not found: products", - "code" : 404, - "errorNum" : 1203 -} diff --git a/Documentation/Examples/RestTransactionSingle.generated b/Documentation/Examples/RestTransactionSingle.generated deleted file mode 100644 index 21b14b833465..000000000000 --- a/Documentation/Examples/RestTransactionSingle.generated +++ /dev/null @@ -1,18 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/transaction <<EOF -{ - "collections" : { - "write" : "products" - }, - "action" : "function () { var db = require('@arangodb').db; db.products.save({}); return db.products.count(); }" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "result" : 1 -} diff --git a/Documentation/Examples/RestTraversalAny.generated b/Documentation/Examples/RestTraversalAny.generated deleted file mode 100644 index 289a753cbeb4..000000000000 --- a/Documentation/Examples/RestTraversalAny.generated +++ /dev/null @@ -1,275 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF -{ - "startVertex" : "persons/alice", - "graphName" : "knows_graph", - "direction" : "any", - "uniqueness" : { - "vertices" : "none", - "edges" : "global" - } -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : { - "visited" : { - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Y_G--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1Y_G--B", - "name" : "Bob" - }, - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1Y_G--D", - "name" : "Charlie" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1Y_G--F", - "name" : "Dave" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1Y_G--H", - "name" : "Eve" - }, - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Y_G--_", - "name" : "Alice" - } - ], - "paths" : [ - { - "edges" : [ ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Y_G--_", - "name" : "Alice" - } - ] - }, - { - "edges" : [ - { - "_key" : "106096", - "_id" : "knows/106096", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1Y_K--_", - "vertex" : "alice" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Y_G--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1Y_G--B", - "name" : "Bob" - } - ] - }, - { - "edges" : [ - { - "_key" : "106096", - "_id" : "knows/106096", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1Y_K--_", - "vertex" : "alice" - }, - { - "_key" : "106100", - "_id" : "knows/106100", - "_from" : "persons/bob", - "_to" : "persons/charlie", - "_rev" : "_YOn1Y_K--B", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Y_G--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1Y_G--B", - "name" : "Bob" - }, - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1Y_G--D", - "name" : "Charlie" - } - ] - }, - { - "edges" : [ - { - "_key" : "106096", - "_id" : "knows/106096", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1Y_K--_", - "vertex" : "alice" - }, - { - "_key" : "106103", - "_id" : "knows/106103", - "_from" : "persons/bob", - "_to" : "persons/dave", - "_rev" : "_YOn1Y_K--D", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Y_G--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1Y_G--B", - "name" : "Bob" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1Y_G--F", - "name" : "Dave" - } - ] - }, - { - "edges" : [ - { - "_key" : "106096", - "_id" : "knows/106096", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1Y_K--_", - "vertex" : "alice" - }, - { - "_key" : "106109", - "_id" : "knows/106109", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1Y_K--H", - "vertex" : "eve" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Y_G--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1Y_G--B", - "name" : "Bob" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1Y_G--H", - "name" : "Eve" - } - ] - }, - { - "edges" : [ - { - "_key" : "106096", - "_id" : "knows/106096", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1Y_K--_", - "vertex" : "alice" - }, - { - "_key" : "106109", - "_id" : "knows/106109", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1Y_K--H", - "vertex" : "eve" - }, - { - "_key" : "106106", - "_id" : "knows/106106", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1Y_K--F", - "vertex" : "eve" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Y_G--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1Y_G--B", - "name" : "Bob" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1Y_G--H", - "name" : "Eve" - }, - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Y_G--_", - "name" : "Alice" - } - ] - } - ] - } - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestTraversalBackwardItemOrder.generated b/Documentation/Examples/RestTraversalBackwardItemOrder.generated deleted file mode 100644 index ef8e0eba6d2e..000000000000 --- a/Documentation/Examples/RestTraversalBackwardItemOrder.generated +++ /dev/null @@ -1,530 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF -{ - "startVertex" : "persons/alice", - "graphName" : "knows_graph", - "direction" : "any", - "itemOrder" : "backward" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : { - "visited" : { - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YBq--_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YBu--C", - "name" : "Eve" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YBq--B", - "name" : "Bob" - }, - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YBq--_", - "name" : "Alice" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1YBu--A", - "name" : "Dave" - }, - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1YBu---", - "name" : "Charlie" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YBq--B", - "name" : "Bob" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YBu--C", - "name" : "Eve" - }, - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YBq--_", - "name" : "Alice" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1YBu--A", - "name" : "Dave" - }, - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1YBu---", - "name" : "Charlie" - } - ], - "paths" : [ - { - "edges" : [ ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YBq--_", - "name" : "Alice" - } - ] - }, - { - "edges" : [ - { - "_key" : "106190", - "_id" : "knows/106190", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1YBy--B", - "vertex" : "eve" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YBq--_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YBu--C", - "name" : "Eve" - } - ] - }, - { - "edges" : [ - { - "_key" : "106190", - "_id" : "knows/106190", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1YBy--B", - "vertex" : "eve" - }, - { - "_key" : "106193", - "_id" : "knows/106193", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1YBy--D", - "vertex" : "eve" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YBq--_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YBu--C", - "name" : "Eve" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YBq--B", - "name" : "Bob" - } - ] - }, - { - "edges" : [ - { - "_key" : "106190", - "_id" : "knows/106190", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1YBy--B", - "vertex" : "eve" - }, - { - "_key" : "106193", - "_id" : "knows/106193", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1YBy--D", - "vertex" : "eve" - }, - { - "_key" : "106180", - "_id" : "knows/106180", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YBu--E", - "vertex" : "alice" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YBq--_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YBu--C", - "name" : "Eve" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YBq--B", - "name" : "Bob" - }, - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YBq--_", - "name" : "Alice" - } - ] - }, - { - "edges" : [ - { - "_key" : "106190", - "_id" : "knows/106190", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1YBy--B", - "vertex" : "eve" - }, - { - "_key" : "106193", - "_id" : "knows/106193", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1YBy--D", - "vertex" : "eve" - }, - { - "_key" : "106187", - "_id" : "knows/106187", - "_from" : "persons/bob", - "_to" : "persons/dave", - "_rev" : "_YOn1YBy--_", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YBq--_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YBu--C", - "name" : "Eve" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YBq--B", - "name" : "Bob" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1YBu--A", - "name" : "Dave" - } - ] - }, - { - "edges" : [ - { - "_key" : "106190", - "_id" : "knows/106190", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1YBy--B", - "vertex" : "eve" - }, - { - "_key" : "106193", - "_id" : "knows/106193", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1YBy--D", - "vertex" : "eve" - }, - { - "_key" : "106184", - "_id" : "knows/106184", - "_from" : "persons/bob", - "_to" : "persons/charlie", - "_rev" : "_YOn1YBu--G", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YBq--_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YBu--C", - "name" : "Eve" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YBq--B", - "name" : "Bob" - }, - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1YBu---", - "name" : "Charlie" - } - ] - }, - { - "edges" : [ - { - "_key" : "106180", - "_id" : "knows/106180", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YBu--E", - "vertex" : "alice" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YBq--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YBq--B", - "name" : "Bob" - } - ] - }, - { - "edges" : [ - { - "_key" : "106180", - "_id" : "knows/106180", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YBu--E", - "vertex" : "alice" - }, - { - "_key" : "106193", - "_id" : "knows/106193", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1YBy--D", - "vertex" : "eve" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YBq--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YBq--B", - "name" : "Bob" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YBu--C", - "name" : "Eve" - } - ] - }, - { - "edges" : [ - { - "_key" : "106180", - "_id" : "knows/106180", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YBu--E", - "vertex" : "alice" - }, - { - "_key" : "106193", - "_id" : "knows/106193", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1YBy--D", - "vertex" : "eve" - }, - { - "_key" : "106190", - "_id" : "knows/106190", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1YBy--B", - "vertex" : "eve" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YBq--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YBq--B", - "name" : "Bob" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YBu--C", - "name" : "Eve" - }, - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YBq--_", - "name" : "Alice" - } - ] - }, - { - "edges" : [ - { - "_key" : "106180", - "_id" : "knows/106180", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YBu--E", - "vertex" : "alice" - }, - { - "_key" : "106187", - "_id" : "knows/106187", - "_from" : "persons/bob", - "_to" : "persons/dave", - "_rev" : "_YOn1YBy--_", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YBq--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YBq--B", - "name" : "Bob" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1YBu--A", - "name" : "Dave" - } - ] - }, - { - "edges" : [ - { - "_key" : "106180", - "_id" : "knows/106180", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YBu--E", - "vertex" : "alice" - }, - { - "_key" : "106184", - "_id" : "knows/106184", - "_from" : "persons/bob", - "_to" : "persons/charlie", - "_rev" : "_YOn1YBu--G", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YBq--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YBq--B", - "name" : "Bob" - }, - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1YBu---", - "name" : "Charlie" - } - ] - } - ] - } - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestTraversalDepthFirst.generated b/Documentation/Examples/RestTraversalDepthFirst.generated deleted file mode 100644 index 6bdb723c8f5f..000000000000 --- a/Documentation/Examples/RestTraversalDepthFirst.generated +++ /dev/null @@ -1,530 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF -{ - "startVertex" : "persons/alice", - "graphName" : "knows_graph", - "direction" : "any", - "strategy" : "depthfirst" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : { - "visited" : { - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YEe--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YEe--B", - "name" : "Bob" - }, - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1YEe--D", - "name" : "Charlie" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1YEe--F", - "name" : "Dave" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YEi--_", - "name" : "Eve" - }, - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YEe--_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YEi--_", - "name" : "Eve" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YEe--B", - "name" : "Bob" - }, - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1YEe--D", - "name" : "Charlie" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1YEe--F", - "name" : "Dave" - }, - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YEe--_", - "name" : "Alice" - } - ], - "paths" : [ - { - "edges" : [ ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YEe--_", - "name" : "Alice" - } - ] - }, - { - "edges" : [ - { - "_key" : "106279", - "_id" : "knows/106279", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YEi--B", - "vertex" : "alice" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YEe--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YEe--B", - "name" : "Bob" - } - ] - }, - { - "edges" : [ - { - "_key" : "106279", - "_id" : "knows/106279", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YEi--B", - "vertex" : "alice" - }, - { - "_key" : "106283", - "_id" : "knows/106283", - "_from" : "persons/bob", - "_to" : "persons/charlie", - "_rev" : "_YOn1YEi--D", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YEe--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YEe--B", - "name" : "Bob" - }, - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1YEe--D", - "name" : "Charlie" - } - ] - }, - { - "edges" : [ - { - "_key" : "106279", - "_id" : "knows/106279", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YEi--B", - "vertex" : "alice" - }, - { - "_key" : "106286", - "_id" : "knows/106286", - "_from" : "persons/bob", - "_to" : "persons/dave", - "_rev" : "_YOn1YEi--F", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YEe--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YEe--B", - "name" : "Bob" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1YEe--F", - "name" : "Dave" - } - ] - }, - { - "edges" : [ - { - "_key" : "106279", - "_id" : "knows/106279", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YEi--B", - "vertex" : "alice" - }, - { - "_key" : "106292", - "_id" : "knows/106292", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1YEi--J", - "vertex" : "eve" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YEe--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YEe--B", - "name" : "Bob" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YEi--_", - "name" : "Eve" - } - ] - }, - { - "edges" : [ - { - "_key" : "106279", - "_id" : "knows/106279", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YEi--B", - "vertex" : "alice" - }, - { - "_key" : "106292", - "_id" : "knows/106292", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1YEi--J", - "vertex" : "eve" - }, - { - "_key" : "106289", - "_id" : "knows/106289", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1YEi--H", - "vertex" : "eve" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YEe--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YEe--B", - "name" : "Bob" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YEi--_", - "name" : "Eve" - }, - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YEe--_", - "name" : "Alice" - } - ] - }, - { - "edges" : [ - { - "_key" : "106289", - "_id" : "knows/106289", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1YEi--H", - "vertex" : "eve" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YEe--_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YEi--_", - "name" : "Eve" - } - ] - }, - { - "edges" : [ - { - "_key" : "106289", - "_id" : "knows/106289", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1YEi--H", - "vertex" : "eve" - }, - { - "_key" : "106292", - "_id" : "knows/106292", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1YEi--J", - "vertex" : "eve" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YEe--_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YEi--_", - "name" : "Eve" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YEe--B", - "name" : "Bob" - } - ] - }, - { - "edges" : [ - { - "_key" : "106289", - "_id" : "knows/106289", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1YEi--H", - "vertex" : "eve" - }, - { - "_key" : "106292", - "_id" : "knows/106292", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1YEi--J", - "vertex" : "eve" - }, - { - "_key" : "106283", - "_id" : "knows/106283", - "_from" : "persons/bob", - "_to" : "persons/charlie", - "_rev" : "_YOn1YEi--D", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YEe--_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YEi--_", - "name" : "Eve" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YEe--B", - "name" : "Bob" - }, - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1YEe--D", - "name" : "Charlie" - } - ] - }, - { - "edges" : [ - { - "_key" : "106289", - "_id" : "knows/106289", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1YEi--H", - "vertex" : "eve" - }, - { - "_key" : "106292", - "_id" : "knows/106292", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1YEi--J", - "vertex" : "eve" - }, - { - "_key" : "106286", - "_id" : "knows/106286", - "_from" : "persons/bob", - "_to" : "persons/dave", - "_rev" : "_YOn1YEi--F", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YEe--_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YEi--_", - "name" : "Eve" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YEe--B", - "name" : "Bob" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1YEe--F", - "name" : "Dave" - } - ] - }, - { - "edges" : [ - { - "_key" : "106289", - "_id" : "knows/106289", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1YEi--H", - "vertex" : "eve" - }, - { - "_key" : "106292", - "_id" : "knows/106292", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1YEi--J", - "vertex" : "eve" - }, - { - "_key" : "106279", - "_id" : "knows/106279", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YEi--B", - "vertex" : "alice" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YEe--_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YEi--_", - "name" : "Eve" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YEe--B", - "name" : "Bob" - }, - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YEe--_", - "name" : "Alice" - } - ] - } - ] - } - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestTraversalEdgeUniqueness.generated b/Documentation/Examples/RestTraversalEdgeUniqueness.generated deleted file mode 100644 index 651c99edf0f6..000000000000 --- a/Documentation/Examples/RestTraversalEdgeUniqueness.generated +++ /dev/null @@ -1,275 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF -{ - "startVertex" : "persons/alice", - "graphName" : "knows_graph", - "direction" : "any", - "uniqueness" : { - "vertices" : "none", - "edges" : "global" - } -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : { - "visited" : { - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YHG--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YHK--_", - "name" : "Bob" - }, - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1YHK--B", - "name" : "Charlie" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1YHK--D", - "name" : "Dave" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YHK--F", - "name" : "Eve" - }, - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YHG--_", - "name" : "Alice" - } - ], - "paths" : [ - { - "edges" : [ ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YHG--_", - "name" : "Alice" - } - ] - }, - { - "edges" : [ - { - "_key" : "106378", - "_id" : "knows/106378", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YHO--_", - "vertex" : "alice" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YHG--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YHK--_", - "name" : "Bob" - } - ] - }, - { - "edges" : [ - { - "_key" : "106378", - "_id" : "knows/106378", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YHO--_", - "vertex" : "alice" - }, - { - "_key" : "106382", - "_id" : "knows/106382", - "_from" : "persons/bob", - "_to" : "persons/charlie", - "_rev" : "_YOn1YHO--B", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YHG--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YHK--_", - "name" : "Bob" - }, - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1YHK--B", - "name" : "Charlie" - } - ] - }, - { - "edges" : [ - { - "_key" : "106378", - "_id" : "knows/106378", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YHO--_", - "vertex" : "alice" - }, - { - "_key" : "106385", - "_id" : "knows/106385", - "_from" : "persons/bob", - "_to" : "persons/dave", - "_rev" : "_YOn1YHO--D", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YHG--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YHK--_", - "name" : "Bob" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1YHK--D", - "name" : "Dave" - } - ] - }, - { - "edges" : [ - { - "_key" : "106378", - "_id" : "knows/106378", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YHO--_", - "vertex" : "alice" - }, - { - "_key" : "106391", - "_id" : "knows/106391", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1YHO--H", - "vertex" : "eve" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YHG--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YHK--_", - "name" : "Bob" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YHK--F", - "name" : "Eve" - } - ] - }, - { - "edges" : [ - { - "_key" : "106378", - "_id" : "knows/106378", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YHO--_", - "vertex" : "alice" - }, - { - "_key" : "106391", - "_id" : "knows/106391", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1YHO--H", - "vertex" : "eve" - }, - { - "_key" : "106388", - "_id" : "knows/106388", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1YHO--F", - "vertex" : "eve" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YHG--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YHK--_", - "name" : "Bob" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YHK--F", - "name" : "Eve" - }, - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YHG--_", - "name" : "Alice" - } - ] - } - ] - } - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestTraversalFilterExclude.generated b/Documentation/Examples/RestTraversalFilterExclude.generated deleted file mode 100644 index 4bfec80a98af..000000000000 --- a/Documentation/Examples/RestTraversalFilterExclude.generated +++ /dev/null @@ -1,88 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF -{ - "startVertex" : "persons/alice", - "graphName" : "knows_graph", - "direction" : "outbound", - "filter" : "if (vertex.name === \"Bob\" || vertex.name === \"Charlie\") { return \"exclude\";}return;" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : { - "visited" : { - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YJi--_", - "name" : "Alice" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1YJm--B", - "name" : "Dave" - } - ], - "paths" : [ - { - "edges" : [ ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YJi--_", - "name" : "Alice" - } - ] - }, - { - "edges" : [ - { - "_key" : "106462", - "_id" : "knows/106462", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YJm--F", - "vertex" : "alice" - }, - { - "_key" : "106469", - "_id" : "knows/106469", - "_from" : "persons/bob", - "_to" : "persons/dave", - "_rev" : "_YOn1YJm--J", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YJi--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YJi--B", - "name" : "Bob" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1YJm--B", - "name" : "Dave" - } - ] - } - ] - } - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestTraversalFilterPrune.generated b/Documentation/Examples/RestTraversalFilterPrune.generated deleted file mode 100644 index a2696132b34a..000000000000 --- a/Documentation/Examples/RestTraversalFilterPrune.generated +++ /dev/null @@ -1,74 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF -{ - "startVertex" : "persons/alice", - "graphName" : "knows_graph", - "direction" : "outbound", - "filter" : "if (vertex.name === \"Bob\") {return \"prune\";}return;" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : { - "visited" : { - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YLu--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YLu--B", - "name" : "Bob" - } - ], - "paths" : [ - { - "edges" : [ ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YLu--_", - "name" : "Alice" - } - ] - }, - { - "edges" : [ - { - "_key" : "106535", - "_id" : "knows/106535", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YLy--B", - "vertex" : "alice" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YLu--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YLu--B", - "name" : "Bob" - } - ] - } - ] - } - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestTraversalInbound.generated b/Documentation/Examples/RestTraversalInbound.generated deleted file mode 100644 index bae487fe21ff..000000000000 --- a/Documentation/Examples/RestTraversalInbound.generated +++ /dev/null @@ -1,73 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF -{ - "startVertex" : "persons/alice", - "graphName" : "knows_graph", - "direction" : "inbound" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : { - "visited" : { - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YN2--_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YN6--D", - "name" : "Eve" - } - ], - "paths" : [ - { - "edges" : [ ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YN2--_", - "name" : "Alice" - } - ] - }, - { - "edges" : [ - { - "_key" : "106613", - "_id" : "knows/106613", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1YN6--L", - "vertex" : "eve" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YN2--_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YN6--D", - "name" : "Eve" - } - ] - } - ] - } - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestTraversalMaxDepth.generated b/Documentation/Examples/RestTraversalMaxDepth.generated deleted file mode 100644 index e57f10083fbc..000000000000 --- a/Documentation/Examples/RestTraversalMaxDepth.generated +++ /dev/null @@ -1,74 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF -{ - "startVertex" : "persons/alice", - "graphName" : "knows_graph", - "direction" : "outbound", - "maxDepth" : 1 -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : { - "visited" : { - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YQ---_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YQ---B", - "name" : "Bob" - } - ], - "paths" : [ - { - "edges" : [ ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YQ---_", - "name" : "Alice" - } - ] - }, - { - "edges" : [ - { - "_key" : "106672", - "_id" : "knows/106672", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YQC--_", - "vertex" : "alice" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YQ---_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YQ---B", - "name" : "Bob" - } - ] - } - ] - } - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestTraversalMaxIterations.generated b/Documentation/Examples/RestTraversalMaxIterations.generated deleted file mode 100644 index 85a9176c08b5..000000000000 --- a/Documentation/Examples/RestTraversalMaxIterations.generated +++ /dev/null @@ -1,23 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF -{ - "startVertex" : "persons/alice", - "graphName" : "knows_graph", - "direction" : "any", - "uniqueness" : { - "vertices" : "none", - "edges" : "none" - }, - "maxIterations" : 5 -} -EOF - -HTTP/1.1 Internal Server Error -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "code" : 500, - "errorNum" : 1909, - "errorMessage" : "too many iterations - try increasing the value of 'maxIterations'" -} diff --git a/Documentation/Examples/RestTraversalMinDepth.generated b/Documentation/Examples/RestTraversalMinDepth.generated deleted file mode 100644 index 1efff4c0706d..000000000000 --- a/Documentation/Examples/RestTraversalMinDepth.generated +++ /dev/null @@ -1,117 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF -{ - "startVertex" : "persons/alice", - "graphName" : "knows_graph", - "direction" : "outbound", - "minDepth" : 2 -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : { - "visited" : { - "vertices" : [ - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1YUO--D", - "name" : "Charlie" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1YUO--F", - "name" : "Dave" - } - ], - "paths" : [ - { - "edges" : [ - { - "_key" : "106842", - "_id" : "knows/106842", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YUS--B", - "vertex" : "alice" - }, - { - "_key" : "106846", - "_id" : "knows/106846", - "_from" : "persons/bob", - "_to" : "persons/charlie", - "_rev" : "_YOn1YUS--D", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YUO--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YUO--B", - "name" : "Bob" - }, - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1YUO--D", - "name" : "Charlie" - } - ] - }, - { - "edges" : [ - { - "_key" : "106842", - "_id" : "knows/106842", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YUS--B", - "vertex" : "alice" - }, - { - "_key" : "106849", - "_id" : "knows/106849", - "_from" : "persons/bob", - "_to" : "persons/dave", - "_rev" : "_YOn1YUS--F", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YUO--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YUO--B", - "name" : "Bob" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1YUO--F", - "name" : "Dave" - } - ] - } - ] - } - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestTraversalOutbound.generated b/Documentation/Examples/RestTraversalOutbound.generated deleted file mode 100644 index 3b917a70e65f..000000000000 --- a/Documentation/Examples/RestTraversalOutbound.generated +++ /dev/null @@ -1,165 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF -{ - "startVertex" : "persons/alice", - "graphName" : "knows_graph", - "direction" : "outbound" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : { - "visited" : { - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YXy--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YXy--B", - "name" : "Bob" - }, - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1YXy--D", - "name" : "Charlie" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1YXy--F", - "name" : "Dave" - } - ], - "paths" : [ - { - "edges" : [ ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YXy--_", - "name" : "Alice" - } - ] - }, - { - "edges" : [ - { - "_key" : "106915", - "_id" : "knows/106915", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YX2--B", - "vertex" : "alice" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YXy--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YXy--B", - "name" : "Bob" - } - ] - }, - { - "edges" : [ - { - "_key" : "106915", - "_id" : "knows/106915", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YX2--B", - "vertex" : "alice" - }, - { - "_key" : "106919", - "_id" : "knows/106919", - "_from" : "persons/bob", - "_to" : "persons/charlie", - "_rev" : "_YOn1YX2--D", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YXy--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YXy--B", - "name" : "Bob" - }, - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1YXy--D", - "name" : "Charlie" - } - ] - }, - { - "edges" : [ - { - "_key" : "106915", - "_id" : "knows/106915", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YX2--B", - "vertex" : "alice" - }, - { - "_key" : "106922", - "_id" : "knows/106922", - "_from" : "persons/bob", - "_to" : "persons/dave", - "_rev" : "_YOn1YX2--F", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1YXy--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1YXy--B", - "name" : "Bob" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1YXy--F", - "name" : "Dave" - } - ] - } - ] - } - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestTraversalPostorder.generated b/Documentation/Examples/RestTraversalPostorder.generated deleted file mode 100644 index e9d093f255da..000000000000 --- a/Documentation/Examples/RestTraversalPostorder.generated +++ /dev/null @@ -1,530 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF -{ - "startVertex" : "persons/alice", - "graphName" : "knows_graph", - "direction" : "any", - "order" : "postorder" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : { - "visited" : { - "vertices" : [ - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1YaC--_", - "name" : "Charlie" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1YaC--B", - "name" : "Dave" - }, - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Ya---_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YaC--D", - "name" : "Eve" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1Ya---B", - "name" : "Bob" - }, - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1YaC--_", - "name" : "Charlie" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1YaC--B", - "name" : "Dave" - }, - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Ya---_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1Ya---B", - "name" : "Bob" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YaC--D", - "name" : "Eve" - }, - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Ya---_", - "name" : "Alice" - } - ], - "paths" : [ - { - "edges" : [ - { - "_key" : "106988", - "_id" : "knows/106988", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YaC--F", - "vertex" : "alice" - }, - { - "_key" : "106992", - "_id" : "knows/106992", - "_from" : "persons/bob", - "_to" : "persons/charlie", - "_rev" : "_YOn1YaC--H", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Ya---_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1Ya---B", - "name" : "Bob" - }, - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1YaC--_", - "name" : "Charlie" - } - ] - }, - { - "edges" : [ - { - "_key" : "106988", - "_id" : "knows/106988", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YaC--F", - "vertex" : "alice" - }, - { - "_key" : "106995", - "_id" : "knows/106995", - "_from" : "persons/bob", - "_to" : "persons/dave", - "_rev" : "_YOn1YaC--J", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Ya---_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1Ya---B", - "name" : "Bob" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1YaC--B", - "name" : "Dave" - } - ] - }, - { - "edges" : [ - { - "_key" : "106988", - "_id" : "knows/106988", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YaC--F", - "vertex" : "alice" - }, - { - "_key" : "107001", - "_id" : "knows/107001", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1YaG--B", - "vertex" : "eve" - }, - { - "_key" : "106998", - "_id" : "knows/106998", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1YaG--_", - "vertex" : "eve" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Ya---_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1Ya---B", - "name" : "Bob" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YaC--D", - "name" : "Eve" - }, - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Ya---_", - "name" : "Alice" - } - ] - }, - { - "edges" : [ - { - "_key" : "106988", - "_id" : "knows/106988", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YaC--F", - "vertex" : "alice" - }, - { - "_key" : "107001", - "_id" : "knows/107001", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1YaG--B", - "vertex" : "eve" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Ya---_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1Ya---B", - "name" : "Bob" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YaC--D", - "name" : "Eve" - } - ] - }, - { - "edges" : [ - { - "_key" : "106988", - "_id" : "knows/106988", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YaC--F", - "vertex" : "alice" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Ya---_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1Ya---B", - "name" : "Bob" - } - ] - }, - { - "edges" : [ - { - "_key" : "106998", - "_id" : "knows/106998", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1YaG--_", - "vertex" : "eve" - }, - { - "_key" : "107001", - "_id" : "knows/107001", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1YaG--B", - "vertex" : "eve" - }, - { - "_key" : "106992", - "_id" : "knows/106992", - "_from" : "persons/bob", - "_to" : "persons/charlie", - "_rev" : "_YOn1YaC--H", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Ya---_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YaC--D", - "name" : "Eve" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1Ya---B", - "name" : "Bob" - }, - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1YaC--_", - "name" : "Charlie" - } - ] - }, - { - "edges" : [ - { - "_key" : "106998", - "_id" : "knows/106998", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1YaG--_", - "vertex" : "eve" - }, - { - "_key" : "107001", - "_id" : "knows/107001", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1YaG--B", - "vertex" : "eve" - }, - { - "_key" : "106995", - "_id" : "knows/106995", - "_from" : "persons/bob", - "_to" : "persons/dave", - "_rev" : "_YOn1YaC--J", - "vertex" : "bob" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Ya---_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YaC--D", - "name" : "Eve" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1Ya---B", - "name" : "Bob" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1YaC--B", - "name" : "Dave" - } - ] - }, - { - "edges" : [ - { - "_key" : "106998", - "_id" : "knows/106998", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1YaG--_", - "vertex" : "eve" - }, - { - "_key" : "107001", - "_id" : "knows/107001", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1YaG--B", - "vertex" : "eve" - }, - { - "_key" : "106988", - "_id" : "knows/106988", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1YaC--F", - "vertex" : "alice" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Ya---_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YaC--D", - "name" : "Eve" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1Ya---B", - "name" : "Bob" - }, - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Ya---_", - "name" : "Alice" - } - ] - }, - { - "edges" : [ - { - "_key" : "106998", - "_id" : "knows/106998", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1YaG--_", - "vertex" : "eve" - }, - { - "_key" : "107001", - "_id" : "knows/107001", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1YaG--B", - "vertex" : "eve" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Ya---_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YaC--D", - "name" : "Eve" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1Ya---B", - "name" : "Bob" - } - ] - }, - { - "edges" : [ - { - "_key" : "106998", - "_id" : "knows/106998", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1YaG--_", - "vertex" : "eve" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Ya---_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1YaC--D", - "name" : "Eve" - } - ] - }, - { - "edges" : [ ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Ya---_", - "name" : "Alice" - } - ] - } - ] - } - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestTraversalVisitorCountAndList.generated b/Documentation/Examples/RestTraversalVisitorCountAndList.generated deleted file mode 100644 index daa8a64d7225..000000000000 --- a/Documentation/Examples/RestTraversalVisitorCountAndList.generated +++ /dev/null @@ -1,47 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF -{ - "startVertex" : "persons/alice", - "graphName" : "knows_graph", - "direction" : "outbound", - "init" : "result.visited = 0; result.myVertices = [ ];", - "visitor" : "result.visited++; result.myVertices.push(vertex);" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : { - "visited" : 4, - "myVertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Yci--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1Ycm--_", - "name" : "Bob" - }, - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1Ycm--B", - "name" : "Charlie" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1Ycm--D", - "name" : "Dave" - } - ] - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestTraversalVisitorExpander.generated b/Documentation/Examples/RestTraversalVisitorExpander.generated deleted file mode 100644 index 426872100f37..000000000000 --- a/Documentation/Examples/RestTraversalVisitorExpander.generated +++ /dev/null @@ -1,119 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF -{ - "startVertex" : "persons/alice", - "graphName" : "knows_graph", - "expander" : "var connections = [ ];if (vertex.name === \"Alice\") {config.datasource.getInEdges(vertex).forEach(function (e) {connections.push({ vertex: require(\"internal\").db._document(e._from), edge: e});});}if (vertex.name === \"Eve\") {config.datasource.getOutEdges(vertex).forEach(function (e) {connections.push({vertex: require(\"internal\").db._document(e._to), edge: e});});}return connections;" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : { - "visited" : { - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Yeq--_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1Yeu--B", - "name" : "Eve" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1Yeq--B", - "name" : "Bob" - } - ], - "paths" : [ - { - "edges" : [ ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Yeq--_", - "name" : "Alice" - } - ] - }, - { - "edges" : [ - { - "_key" : "107170", - "_id" : "knows/107170", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1Yeu--J", - "vertex" : "eve" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Yeq--_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1Yeu--B", - "name" : "Eve" - } - ] - }, - { - "edges" : [ - { - "_key" : "107170", - "_id" : "knows/107170", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1Yeu--J", - "vertex" : "eve" - }, - { - "_key" : "107173", - "_id" : "knows/107173", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1Yey--_", - "vertex" : "eve" - } - ], - "vertices" : [ - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1Yeq--_", - "name" : "Alice" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1Yeu--B", - "name" : "Eve" - }, - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1Yeq--B", - "name" : "Bob" - } - ] - } - ] - } - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestTraversalVisitorFunc.generated b/Documentation/Examples/RestTraversalVisitorFunc.generated deleted file mode 100644 index 97d409485a54..000000000000 --- a/Documentation/Examples/RestTraversalVisitorFunc.generated +++ /dev/null @@ -1,28 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF -{ - "startVertex" : "persons/alice", - "graphName" : "knows_graph", - "direction" : "outbound", - "visitor" : "result.visited.vertices.push(vertex._id);" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : { - "visited" : { - "vertices" : [ - "persons/alice", - "persons/bob", - "persons/charlie", - "persons/dave" - ], - "paths" : [ ] - } - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestUpdateUser.generated b/Documentation/Examples/RestUpdateUser.generated deleted file mode 100644 index 67ae953fb25d..000000000000 --- a/Documentation/Examples/RestUpdateUser.generated +++ /dev/null @@ -1,18 +0,0 @@ -shell> curl -X PATCH --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/user/admin@myapp <<EOF -{ - "passwd" : "secure" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "user" : "admin@myapp", - "active" : true, - "extra" : { - }, - "error" : false, - "code" : 200 -} diff --git a/Documentation/Examples/RestVersion.generated b/Documentation/Examples/RestVersion.generated deleted file mode 100644 index d0377387d96c..000000000000 --- a/Documentation/Examples/RestVersion.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/version - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "server" : "arango", - "version" : "3.5.0-devel", - "license" : "enterprise" -} diff --git a/Documentation/Examples/RestVersionDetails.generated b/Documentation/Examples/RestVersionDetails.generated deleted file mode 100644 index 42b1d5393b8d..000000000000 --- a/Documentation/Examples/RestVersionDetails.generated +++ /dev/null @@ -1,55 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/version?details=true - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "server" : "arango", - "version" : "3.5.0-devel", - "license" : "enterprise", - "details" : { - "architecture" : "64bit", - "arm" : "false", - "asan" : "false", - "asm-crc32" : "true", - "assertions" : "true", - "avx2" : "true", - "boost-version" : "1.69.0", - "build-date" : "2019-02-20 08:48:55", - "build-repository" : "heads/bug-fix/fix-query-cache-shutdown-0-gd977f1786a", - "compiler" : "gcc [8.2.0]", - "cplusplus" : "201402", - "curl-version" : "libcurl/7.63.0 OpenSSL/1.1.0g", - "debug" : "false", - "endianness" : "little", - "enterprise-version" : "enterprise", - "failure-tests" : "true", - "fd-client-event-handler" : "poll", - "fd-setsize" : "1024", - "full-version-string" : "ArangoDB 3.5.0-devel enterprise [linux] 64bit maintainer mode, using jemalloc, build heads/bug-fix/fix-query-cache-shutdown-0-gd977f1786a, VPack 0.1.33, RocksDB 5.18.0, ICU 58.1, V8 7.1.302.28, OpenSSL 1.1.0g 2 Nov 2017", - "icu-version" : "58.1", - "iresearch-version" : "1.0.0.0", - "jemalloc" : "true", - "license" : "enterprise", - "maintainer-mode" : "true", - "ndebug" : "true", - "openssl-version-compile-time" : "OpenSSL 1.1.0g 2 Nov 2017", - "openssl-version-run-time" : "OpenSSL 1.1.0g 2 Nov 2017", - "optimization-flags" : "-march=haswell -msse2 -msse3 -mssse3 -msse4.1 -msse4.2 -mavx -mfma -mbmi2 -mavx2 -mno-sse4a -mno-xop -mno-fma4 -mno-avx512f -mno-avx512vl -mno-avx512pf -mno-avx512er -mno-avx512cd -mno-avx512dq -mno-avx512bw -mno-avx512ifma -mno-avx512vbmi", - "platform" : "linux", - "reactor-type" : "epoll", - "rocksdb-version" : "5.18.0", - "server-version" : "3.5.0-devel", - "sizeof int" : "4", - "sizeof long" : "8", - "sizeof void*" : "8", - "sse42" : "true", - "unaligned-access" : "true", - "v8-version" : "7.1.302.28", - "vpack-version" : "0.1.33", - "zlib-version" : "1.2.11", - "mode" : "server", - "host" : "c54ebb83e5eb4257b9e0f7201ba87ded" - } -} diff --git a/Documentation/Examples/RestViewDeleteViewIdentifier.generated b/Documentation/Examples/RestViewDeleteViewIdentifier.generated deleted file mode 100644 index 6bb8c0c7c2e6..000000000000 --- a/Documentation/Examples/RestViewDeleteViewIdentifier.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/view/107285 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "result" : true -} diff --git a/Documentation/Examples/RestViewDeleteViewName.generated b/Documentation/Examples/RestViewDeleteViewName.generated deleted file mode 100644 index 3738a2c62b76..000000000000 --- a/Documentation/Examples/RestViewDeleteViewName.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/view/testView - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "result" : true -} diff --git a/Documentation/Examples/RestViewGetAllViews.generated b/Documentation/Examples/RestViewGetAllViews.generated deleted file mode 100644 index 38b440680825..000000000000 --- a/Documentation/Examples/RestViewGetAllViews.generated +++ /dev/null @@ -1,18 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/view - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "result" : [ - { - "globallyUniqueId" : "h8B2B671BCFD0/102", - "id" : "102", - "name" : "demoView", - "type" : "arangosearch" - } - ] -} diff --git a/Documentation/Examples/RestViewGetViewIdentifier.generated b/Documentation/Examples/RestViewGetViewIdentifier.generated deleted file mode 100644 index 20c9a9a0ec7a..000000000000 --- a/Documentation/Examples/RestViewGetViewIdentifier.generated +++ /dev/null @@ -1,14 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/view/107297 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "type" : "arangosearch", - "name" : "testView", - "globallyUniqueId" : "h8B2B671BCFD0/107297", - "id" : "107297" -} diff --git a/Documentation/Examples/RestViewGetViewName.generated b/Documentation/Examples/RestViewGetViewName.generated deleted file mode 100644 index d81032fd46d6..000000000000 --- a/Documentation/Examples/RestViewGetViewName.generated +++ /dev/null @@ -1,14 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/view/testView - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "type" : "arangosearch", - "name" : "testView", - "globallyUniqueId" : "h8B2B671BCFD0/107303", - "id" : "107303" -} diff --git a/Documentation/Examples/RestViewGetViewPropertiesIdentifier.generated b/Documentation/Examples/RestViewGetViewPropertiesIdentifier.generated deleted file mode 100644 index f3cdb06c62a2..000000000000 --- a/Documentation/Examples/RestViewGetViewPropertiesIdentifier.generated +++ /dev/null @@ -1,26 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/view/107309/properties - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "writebufferIdle" : 64, - "writebufferActive" : 0, - "type" : "arangosearch", - "writebufferSizeMax" : 33554432, - "name" : "products", - "commitIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "globallyUniqueId" : "h8B2B671BCFD0/107309", - "cleanupIntervalStep" : 10, - "id" : "107309", - "links" : { - }, - "consolidationIntervalMsec" : 60000 -} diff --git a/Documentation/Examples/RestViewGetViewPropertiesName.generated b/Documentation/Examples/RestViewGetViewPropertiesName.generated deleted file mode 100644 index d64ce89f6e4b..000000000000 --- a/Documentation/Examples/RestViewGetViewPropertiesName.generated +++ /dev/null @@ -1,26 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/view/products/properties - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "writebufferIdle" : 64, - "writebufferActive" : 0, - "type" : "arangosearch", - "writebufferSizeMax" : 33554432, - "name" : "products", - "commitIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "globallyUniqueId" : "h8B2B671BCFD0/107316", - "cleanupIntervalStep" : 10, - "id" : "107316", - "links" : { - }, - "consolidationIntervalMsec" : 60000 -} diff --git a/Documentation/Examples/RestViewPatchProperties.generated b/Documentation/Examples/RestViewPatchProperties.generated deleted file mode 100644 index d44203ef39ee..000000000000 --- a/Documentation/Examples/RestViewPatchProperties.generated +++ /dev/null @@ -1,27 +0,0 @@ -shell> curl -X PATCH --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/view/products/properties <<EOF -{ -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "globallyUniqueId" : "h8B2B671BCFD0/107323", - "id" : "107323", - "name" : "products", - "type" : "arangosearch", - "cleanupIntervalStep" : 10, - "commitIntervalMsec" : 60000, - "consolidationIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "writebufferActive" : 0, - "writebufferIdle" : 64, - "writebufferSizeMax" : 33554432, - "links" : { - } -} diff --git a/Documentation/Examples/RestViewPostView.generated b/Documentation/Examples/RestViewPostView.generated deleted file mode 100644 index 6f2a6b661e0f..000000000000 --- a/Documentation/Examples/RestViewPostView.generated +++ /dev/null @@ -1,29 +0,0 @@ -shell> curl -X POST --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/view <<EOF -{ - "name" : "testViewBasics", - "type" : "arangosearch" -} -EOF - -HTTP/1.1 Created -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "globallyUniqueId" : "h8B2B671BCFD0/107331", - "id" : "107331", - "name" : "testViewBasics", - "type" : "arangosearch", - "cleanupIntervalStep" : 10, - "commitIntervalMsec" : 60000, - "consolidationIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "writebufferActive" : 0, - "writebufferIdle" : 64, - "writebufferSizeMax" : 33554432, - "links" : { - } -} diff --git a/Documentation/Examples/RestViewPutProperties.generated b/Documentation/Examples/RestViewPutProperties.generated deleted file mode 100644 index 81e1ab1bf748..000000000000 --- a/Documentation/Examples/RestViewPutProperties.generated +++ /dev/null @@ -1,27 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/view/products/properties <<EOF -{ -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "globallyUniqueId" : "h8B2B671BCFD0/107337", - "id" : "107337", - "name" : "products", - "type" : "arangosearch", - "cleanupIntervalStep" : 10, - "commitIntervalMsec" : 60000, - "consolidationIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "writebufferActive" : 0, - "writebufferIdle" : 64, - "writebufferSizeMax" : 33554432, - "links" : { - } -} diff --git a/Documentation/Examples/RestViewPutViewRename.generated b/Documentation/Examples/RestViewPutViewRename.generated deleted file mode 100644 index c09ac2cf4e3f..000000000000 --- a/Documentation/Examples/RestViewPutViewRename.generated +++ /dev/null @@ -1,18 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/view/products1/rename <<EOF -{ - "name" : "viewNewName" -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : false, - "code" : 200, - "type" : "arangosearch", - "name" : "viewNewName", - "globallyUniqueId" : "h8B2B671BCFD0/107345", - "id" : "107345" -} diff --git a/Documentation/Examples/RestWalAccessFirstTick.generated b/Documentation/Examples/RestWalAccessFirstTick.generated deleted file mode 100644 index 49591f1590ab..000000000000 --- a/Documentation/Examples/RestWalAccessFirstTick.generated +++ /dev/null @@ -1,14 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/wal/lastTick - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "time" : "2019-02-20T10:33:12Z", - "tick" : "107352", - "server" : { - "version" : "3.5.0-devel", - "serverId" : "153018529730512" - } -} diff --git a/Documentation/Examples/RestWalAccessTailingBufferLimit.generated b/Documentation/Examples/RestWalAccessTailingBufferLimit.generated deleted file mode 100644 index 9637f8915de0..000000000000 --- a/Documentation/Examples/RestWalAccessTailingBufferLimit.generated +++ /dev/null @@ -1,17 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/wal/tail?from=107352&chunkSize=400 - -HTTP/1.1 OK -content-type: application/x-arango-dump; charset=utf-8 -x-arango-replication-checkmore: true -x-arango-replication-frompresent: true -x-arango-replication-lastincluded: 107370 -x-arango-replication-lastscanned: 107382 -x-arango-replication-lasttick: 107382 -x-content-type-options: nosniff - -{ - "tick" : "107370", - "type" : 2001, - "db" : "_system", - "cuid" : "h8B2B671BCFD0/107355" -} diff --git a/Documentation/Examples/RestWalAccessTailingEmpty.generated b/Documentation/Examples/RestWalAccessTailingEmpty.generated deleted file mode 100644 index b23bfab79796..000000000000 --- a/Documentation/Examples/RestWalAccessTailingEmpty.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/wal/tail?from=107382 - -HTTP/1.1 No Content -content-type: application/x-arango-dump; charset=utf-8 -x-arango-replication-checkmore: false -x-arango-replication-frompresent: true -x-arango-replication-lastincluded: 0 -x-arango-replication-lastscanned: 107382 -x-arango-replication-lasttick: 107382 -x-content-type-options: nosniff - diff --git a/Documentation/Examples/RestWalAccessTailingSome.generated b/Documentation/Examples/RestWalAccessTailingSome.generated deleted file mode 100644 index bacaca86a8d7..000000000000 --- a/Documentation/Examples/RestWalAccessTailingSome.generated +++ /dev/null @@ -1,17 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/wal/tail?from=107382 - -HTTP/1.1 OK -content-type: application/x-arango-dump; charset=utf-8 -x-arango-replication-checkmore: true -x-arango-replication-frompresent: true -x-arango-replication-lastincluded: 107400 -x-arango-replication-lastscanned: 107403 -x-arango-replication-lasttick: 107403 -x-content-type-options: nosniff - -{ - "tick" : "107400", - "type" : 2001, - "db" : "_system", - "cuid" : "h8B2B671BCFD0/107385" -}↩ diff --git a/Documentation/Examples/RestWalAccessTickRange.generated b/Documentation/Examples/RestWalAccessTickRange.generated deleted file mode 100644 index 2e179d20fe62..000000000000 --- a/Documentation/Examples/RestWalAccessTickRange.generated +++ /dev/null @@ -1,15 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/wal/range - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "time" : "2019-02-20T10:33:16Z", - "tickMin" : "5", - "tickMax" : "107403", - "server" : { - "version" : "3.5.0-devel", - "serverId" : "153018529730512" - } -} diff --git a/Documentation/Examples/RestWalPropertiesGet_mmfiles.generated b/Documentation/Examples/RestWalPropertiesGet_mmfiles.generated deleted file mode 100644 index 3b77942db0a9..000000000000 --- a/Documentation/Examples/RestWalPropertiesGet_mmfiles.generated +++ /dev/null @@ -1,15 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_admin/wal/properties - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "allowOversizeEntries" : true, - "logfileSize" : 33554432, - "historicLogfiles" : 10, - "reserveLogfiles" : 3, - "syncInterval" : 100, - "throttleWait" : 15000, - "throttleWhenPending" : 0 -} diff --git a/Documentation/Examples/RestWalPropertiesPut_mmfiles.generated b/Documentation/Examples/RestWalPropertiesPut_mmfiles.generated deleted file mode 100644 index c07f5501a94a..000000000000 --- a/Documentation/Examples/RestWalPropertiesPut_mmfiles.generated +++ /dev/null @@ -1,20 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_admin/wal/properties <<EOF -{ - "logfileSize" : 33554432, - "allowOversizeEntries" : true -} -EOF - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "allowOversizeEntries" : true, - "logfileSize" : 33554432, - "historicLogfiles" : 10, - "reserveLogfiles" : 3, - "syncInterval" : 100, - "throttleWait" : 15000, - "throttleWhenPending" : 0 -} diff --git a/Documentation/Examples/RestWalTransactionsGet_mmfiles.generated b/Documentation/Examples/RestWalTransactionsGet_mmfiles.generated deleted file mode 100644 index 11d8f37bb3d3..000000000000 --- a/Documentation/Examples/RestWalTransactionsGet_mmfiles.generated +++ /dev/null @@ -1,11 +0,0 @@ -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_admin/wal/transactions - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "runningTransactions" : 3, - "minLastCollected" : 85, - "minLastSealed" : null -} diff --git a/Documentation/Examples/USER_02_saveUser.generated b/Documentation/Examples/USER_02_saveUser.generated deleted file mode 100644 index 3b88c819b14f..000000000000 --- a/Documentation/Examples/USER_02_saveUser.generated +++ /dev/null @@ -1,8 +0,0 @@ -arangosh> require('@arangodb/users').save('my-user', 'my-secret-password'); -{ - "user" : "my-user", - "active" : true, - "extra" : { - }, - "code" : 201 -} diff --git a/Documentation/Examples/USER_03_reloadUser.generated b/Documentation/Examples/USER_03_reloadUser.generated deleted file mode 100644 index 72d7024bb216..000000000000 --- a/Documentation/Examples/USER_03_reloadUser.generated +++ /dev/null @@ -1 +0,0 @@ -arangosh> require("@arangodb/users").reload(); diff --git a/Documentation/Examples/USER_03_replaceUser.generated b/Documentation/Examples/USER_03_replaceUser.generated deleted file mode 100644 index e101b1a084c0..000000000000 --- a/Documentation/Examples/USER_03_replaceUser.generated +++ /dev/null @@ -1,8 +0,0 @@ -arangosh> require("@arangodb/users").replace("my-user", "my-changed-password"); -{ - "user" : "my-user", - "active" : true, - "extra" : { - }, - "code" : 200 -} diff --git a/Documentation/Examples/USER_04_documentUser.generated b/Documentation/Examples/USER_04_documentUser.generated deleted file mode 100644 index 28fabd9ac724..000000000000 --- a/Documentation/Examples/USER_04_documentUser.generated +++ /dev/null @@ -1,8 +0,0 @@ -arangosh> require("@arangodb/users").document("my-user"); -{ - "user" : "my-user", - "active" : true, - "extra" : { - }, - "code" : 200 -} diff --git a/Documentation/Examples/USER_04_updateUser.generated b/Documentation/Examples/USER_04_updateUser.generated deleted file mode 100644 index 863687b0afee..000000000000 --- a/Documentation/Examples/USER_04_updateUser.generated +++ /dev/null @@ -1,8 +0,0 @@ -arangosh> require("@arangodb/users").update("my-user", "my-secret-password"); -{ - "user" : "my-user", - "active" : true, - "extra" : { - }, - "code" : 200 -} diff --git a/Documentation/Examples/USER_05_isValidUser.generated b/Documentation/Examples/USER_05_isValidUser.generated deleted file mode 100644 index 84e3ecc19434..000000000000 --- a/Documentation/Examples/USER_05_isValidUser.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> require("@arangodb/users").isValid("my-user", "my-secret-password"); -true diff --git a/Documentation/Examples/USER_05_permission.generated b/Documentation/Examples/USER_05_permission.generated deleted file mode 100644 index 3c045562be0a..000000000000 --- a/Documentation/Examples/USER_05_permission.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> require("@arangodb/users").permission("my-user", "testdb"); -rw diff --git a/Documentation/Examples/USER_06_AllUsers.generated b/Documentation/Examples/USER_06_AllUsers.generated deleted file mode 100644 index 092efde07f74..000000000000 --- a/Documentation/Examples/USER_06_AllUsers.generated +++ /dev/null @@ -1,27 +0,0 @@ -arangosh> require("@arangodb/users").all(); -[ - { - "user" : "tester", - "active" : false, - "extra" : { - } - }, - { - "user" : "my-user", - "active" : true, - "extra" : { - } - }, - { - "user" : "admin", - "active" : true, - "extra" : { - } - }, - { - "user" : "root", - "active" : true, - "extra" : { - } - } -] diff --git a/Documentation/Examples/USER_07_removeUser.generated b/Documentation/Examples/USER_07_removeUser.generated deleted file mode 100644 index 001d26fa9be2..000000000000 --- a/Documentation/Examples/USER_07_removeUser.generated +++ /dev/null @@ -1 +0,0 @@ -arangosh> require("@arangodb/users").remove("my-user"); diff --git a/Documentation/Examples/WalFlush.generated b/Documentation/Examples/WalFlush.generated deleted file mode 100644 index 2332e2500a33..000000000000 --- a/Documentation/Examples/WalFlush.generated +++ /dev/null @@ -1 +0,0 @@ -arangosh> require("internal").wal.flush(); diff --git a/Documentation/Examples/WalPropertiesGet_mmfiles.generated b/Documentation/Examples/WalPropertiesGet_mmfiles.generated deleted file mode 100644 index 31a81b8c9933..000000000000 --- a/Documentation/Examples/WalPropertiesGet_mmfiles.generated +++ /dev/null @@ -1,3 +0,0 @@ -arangosh> require("internal").wal.properties(); -{ -} diff --git a/Documentation/Examples/WalPropertiesSet_mmfiles.generated b/Documentation/Examples/WalPropertiesSet_mmfiles.generated deleted file mode 100644 index e01937d11c81..000000000000 --- a/Documentation/Examples/WalPropertiesSet_mmfiles.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> require("internal").wal.properties({ -........> allowOverSizeEntries: true, -........> logfileSize: 32 * 1024 * 1024 }); -{ -} diff --git a/Documentation/Examples/accessViaGeoIndex.generated b/Documentation/Examples/accessViaGeoIndex.generated deleted file mode 100644 index 821537312795..000000000000 --- a/Documentation/Examples/accessViaGeoIndex.generated +++ /dev/null @@ -1,30 +0,0 @@ -arangosh> for (i = -90; i <= 90; i += 10) { -........> for (j = -180; j <= 180; j += 10) { -........> db.example.insert({ name : "Name/" + i + "/" + j, -........> home : [ i, j ], -........> work : [ -i, -j ] }); -........> } -........> } -........> -arangosh> db.example.ensureIndex({ type: "geo", fields: [ "home" ] }); -{ - "bestIndexedLevel" : 17, - "fields" : [ - "home" - ], - "geoJson" : false, - "id" : "example/109553", - "isNewlyCreated" : true, - "maxNumCoverCells" : 8, - "sparse" : true, - "type" : "geo", - "unique" : false, - "worstIndexedLevel" : 4, - "code" : 201 -} -arangosh> items = db.example.getIndexes().map(function(x) { return x.id; }); -........> db.example.index(items[1]); -[ - "example/0", - "example/109553" -] diff --git a/Documentation/Examples/aqlArrayAppend_1.generated b/Documentation/Examples/aqlArrayAppend_1.generated deleted file mode 100644 index 1d0b3a9fc7dd..000000000000 --- a/Documentation/Examples/aqlArrayAppend_1.generated +++ /dev/null @@ -1,14 +0,0 @@ -@Q: -RETURN APPEND([ 1, 2, 3 ], [ 5, 6, 9 ]) - -@R -[ - [ - 1, - 2, - 3, - 5, - 6, - 9 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayAppend_2.generated b/Documentation/Examples/aqlArrayAppend_2.generated deleted file mode 100644 index c11a59e1db6b..000000000000 --- a/Documentation/Examples/aqlArrayAppend_2.generated +++ /dev/null @@ -1,14 +0,0 @@ -@Q: -RETURN APPEND([ 1, 2, 3 ], [ 3, 4, 5, 2, 9 ], true) - -@R -[ - [ - 1, - 2, - 3, - 4, - 5, - 9 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayCountDistinct_1.generated b/Documentation/Examples/aqlArrayCountDistinct_1.generated deleted file mode 100644 index c4664c474a25..000000000000 --- a/Documentation/Examples/aqlArrayCountDistinct_1.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -RETURN COUNT_DISTINCT([ 1, 2, 3 ]) - -@R -[ - 3 -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayCountDistinct_2.generated b/Documentation/Examples/aqlArrayCountDistinct_2.generated deleted file mode 100644 index f42f0f42b3e1..000000000000 --- a/Documentation/Examples/aqlArrayCountDistinct_2.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -RETURN COUNT_DISTINCT([ "yes", "no", "yes", "sauron", "no", "yes" ]) - -@R -[ - 3 -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayFirst_1.generated b/Documentation/Examples/aqlArrayFirst_1.generated deleted file mode 100644 index 2cad46a52627..000000000000 --- a/Documentation/Examples/aqlArrayFirst_1.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -RETURN FIRST([ 1, 2, 3 ]) - -@R -[ - 1 -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayFirst_2.generated b/Documentation/Examples/aqlArrayFirst_2.generated deleted file mode 100644 index d448f81932de..000000000000 --- a/Documentation/Examples/aqlArrayFirst_2.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -RETURN FIRST([]) - -@R -[ - null -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayFlatten_1.generated b/Documentation/Examples/aqlArrayFlatten_1.generated deleted file mode 100644 index ad1c40bb4eb9..000000000000 --- a/Documentation/Examples/aqlArrayFlatten_1.generated +++ /dev/null @@ -1,20 +0,0 @@ -@Q: -RETURN FLATTEN( [ 1, 2, [ 3, 4 ], 5, [ 6, 7 ], [ 8, [ 9, 10 ] ] ] ) - -@R -[ - [ - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - [ - 9, - 10 - ] - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayFlatten_2.generated b/Documentation/Examples/aqlArrayFlatten_2.generated deleted file mode 100644 index 525424a01df9..000000000000 --- a/Documentation/Examples/aqlArrayFlatten_2.generated +++ /dev/null @@ -1,18 +0,0 @@ -@Q: -RETURN FLATTEN( [ 1, 2, [ 3, 4 ], 5, [ 6, 7 ], [ 8, [ 9, 10 ] ] ], 2 ) - -@R -[ - [ - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayIntersection_1.generated b/Documentation/Examples/aqlArrayIntersection_1.generated deleted file mode 100644 index b21b98a20884..000000000000 --- a/Documentation/Examples/aqlArrayIntersection_1.generated +++ /dev/null @@ -1,11 +0,0 @@ -@Q: -RETURN INTERSECTION( [1,2,3,4,5], [2,3,4,5,6], [3,4,5,6,7] ) - -@R -[ - [ - 5, - 4, - 3 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayIntersection_2.generated b/Documentation/Examples/aqlArrayIntersection_2.generated deleted file mode 100644 index 555f513617fe..000000000000 --- a/Documentation/Examples/aqlArrayIntersection_2.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -RETURN INTERSECTION( [2,4,6], [8,10,12], [14,16,18] ) - -@R -[ - [] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayLast_1.generated b/Documentation/Examples/aqlArrayLast_1.generated deleted file mode 100644 index ee414f6af041..000000000000 --- a/Documentation/Examples/aqlArrayLast_1.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -RETURN LAST( [1,2,3,4,5] ) - -@R -[ - 5 -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayLength_1.generated b/Documentation/Examples/aqlArrayLength_1.generated deleted file mode 100644 index 01ab7c2cf4ae..000000000000 --- a/Documentation/Examples/aqlArrayLength_1.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -RETURN LENGTH( "🥑" ) - -@R -[ - 1 -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayLength_2.generated b/Documentation/Examples/aqlArrayLength_2.generated deleted file mode 100644 index 4401eb83061e..000000000000 --- a/Documentation/Examples/aqlArrayLength_2.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -RETURN LENGTH( 1234 ) - -@R -[ - 4 -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayLength_3.generated b/Documentation/Examples/aqlArrayLength_3.generated deleted file mode 100644 index ffc7804a9f40..000000000000 --- a/Documentation/Examples/aqlArrayLength_3.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -RETURN LENGTH( [1,2,3,4,5,6,7] ) - -@R -[ - 7 -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayLength_4.generated b/Documentation/Examples/aqlArrayLength_4.generated deleted file mode 100644 index ffc7804a9f40..000000000000 --- a/Documentation/Examples/aqlArrayLength_4.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -RETURN LENGTH( [1,2,3,4,5,6,7] ) - -@R -[ - 7 -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayLength_5.generated b/Documentation/Examples/aqlArrayLength_5.generated deleted file mode 100644 index cf9d3c088d18..000000000000 --- a/Documentation/Examples/aqlArrayLength_5.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -RETURN LENGTH( {a:1, b:2, c:3, d:4, e:{f:5,g:6}} ) - -@R -[ - 5 -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayMinus_1.generated b/Documentation/Examples/aqlArrayMinus_1.generated deleted file mode 100644 index bba21309da42..000000000000 --- a/Documentation/Examples/aqlArrayMinus_1.generated +++ /dev/null @@ -1,10 +0,0 @@ -@Q: -RETURN MINUS( [1,2,3,4], [3,4,5,6], [5,6,7,8] ) - -@R -[ - [ - 2, - 1 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayNth_1.generated b/Documentation/Examples/aqlArrayNth_1.generated deleted file mode 100644 index f92d9f9904c5..000000000000 --- a/Documentation/Examples/aqlArrayNth_1.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -RETURN NTH( [ "foo", "bar", "baz" ], 2 ) - -@R -[ - "baz" -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayNth_2.generated b/Documentation/Examples/aqlArrayNth_2.generated deleted file mode 100644 index 826385649ebf..000000000000 --- a/Documentation/Examples/aqlArrayNth_2.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -RETURN NTH( [ "foo", "bar", "baz" ], 3 ) - -@R -[ - null -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayNth_3.generated b/Documentation/Examples/aqlArrayNth_3.generated deleted file mode 100644 index c349add8e115..000000000000 --- a/Documentation/Examples/aqlArrayNth_3.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -RETURN NTH( [ "foo", "bar", "baz" ], -1 ) - -@R -[ - null -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayOutersection_1.generated b/Documentation/Examples/aqlArrayOutersection_1.generated deleted file mode 100644 index 9dc0eb948dfd..000000000000 --- a/Documentation/Examples/aqlArrayOutersection_1.generated +++ /dev/null @@ -1,10 +0,0 @@ -@Q: -RETURN OUTERSECTION( [ 1, 2, 3 ], [ 2, 3, 4 ], [ 3, 4, 5 ] ) - -@R -[ - [ - 5, - 1 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayPop_1.generated b/Documentation/Examples/aqlArrayPop_1.generated deleted file mode 100644 index c63ddb7b574b..000000000000 --- a/Documentation/Examples/aqlArrayPop_1.generated +++ /dev/null @@ -1,11 +0,0 @@ -@Q: -RETURN POP( [ 1, 2, 3, 4 ] ) - -@R -[ - [ - 1, - 2, - 3 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayPop_2.generated b/Documentation/Examples/aqlArrayPop_2.generated deleted file mode 100644 index 3e8856a4c31e..000000000000 --- a/Documentation/Examples/aqlArrayPop_2.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -RETURN POP( [ 1 ] ) - -@R -[ - [] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayPosition_1.generated b/Documentation/Examples/aqlArrayPosition_1.generated deleted file mode 100644 index 43b2c07af654..000000000000 --- a/Documentation/Examples/aqlArrayPosition_1.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -RETURN POSITION( [2,4,6,8], 4 ) - -@R -[ - true -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayPosition_2.generated b/Documentation/Examples/aqlArrayPosition_2.generated deleted file mode 100644 index 032c095e01ff..000000000000 --- a/Documentation/Examples/aqlArrayPosition_2.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -RETURN POSITION( [2,4,6,8], 4, true ) - -@R -[ - 1 -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayPush_1.generated b/Documentation/Examples/aqlArrayPush_1.generated deleted file mode 100644 index 3f5e74b7ab24..000000000000 --- a/Documentation/Examples/aqlArrayPush_1.generated +++ /dev/null @@ -1,12 +0,0 @@ -@Q: -RETURN PUSH([ 1, 2, 3 ], 4) - -@R -[ - [ - 1, - 2, - 3, - 4 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayPush_2.generated b/Documentation/Examples/aqlArrayPush_2.generated deleted file mode 100644 index 8705e29de173..000000000000 --- a/Documentation/Examples/aqlArrayPush_2.generated +++ /dev/null @@ -1,12 +0,0 @@ -@Q: -RETURN PUSH([ 1, 2, 2, 3 ], 2, true) - -@R -[ - [ - 1, - 2, - 2, - 3 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayRemoveNth_1.generated b/Documentation/Examples/aqlArrayRemoveNth_1.generated deleted file mode 100644 index 5cdcf41e2899..000000000000 --- a/Documentation/Examples/aqlArrayRemoveNth_1.generated +++ /dev/null @@ -1,12 +0,0 @@ -@Q: -RETURN REMOVE_NTH( [ "a", "b", "c", "d", "e" ], 1 ) - -@R -[ - [ - "a", - "c", - "d", - "e" - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayRemoveNth_2.generated b/Documentation/Examples/aqlArrayRemoveNth_2.generated deleted file mode 100644 index 141bd84104c8..000000000000 --- a/Documentation/Examples/aqlArrayRemoveNth_2.generated +++ /dev/null @@ -1,12 +0,0 @@ -@Q: -RETURN REMOVE_NTH( [ "a", "b", "c", "d", "e" ], -2 ) - -@R -[ - [ - "a", - "b", - "c", - "e" - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayRemoveValue_1.generated b/Documentation/Examples/aqlArrayRemoveValue_1.generated deleted file mode 100644 index d0190c56187e..000000000000 --- a/Documentation/Examples/aqlArrayRemoveValue_1.generated +++ /dev/null @@ -1,11 +0,0 @@ -@Q: -RETURN REMOVE_VALUE( [ "a", "b", "b", "a", "c" ], "a" ) - -@R -[ - [ - "b", - "b", - "c" - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayRemoveValue_2.generated b/Documentation/Examples/aqlArrayRemoveValue_2.generated deleted file mode 100644 index f318faa669a7..000000000000 --- a/Documentation/Examples/aqlArrayRemoveValue_2.generated +++ /dev/null @@ -1,12 +0,0 @@ -@Q: -RETURN REMOVE_VALUE( [ "a", "b", "b", "a", "c" ], "a", 1 ) - -@R -[ - [ - "b", - "b", - "a", - "c" - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayRemoveValues_1.generated b/Documentation/Examples/aqlArrayRemoveValues_1.generated deleted file mode 100644 index 66518a1683e1..000000000000 --- a/Documentation/Examples/aqlArrayRemoveValues_1.generated +++ /dev/null @@ -1,11 +0,0 @@ -@Q: -RETURN REMOVE_VALUES( [ "a", "a", "b", "c", "d", "e", "f" ], [ "a", "f", "d" ] ) - -@R -[ - [ - "b", - "c", - "e" - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayReverse_1.generated b/Documentation/Examples/aqlArrayReverse_1.generated deleted file mode 100644 index 9d4c17234b4a..000000000000 --- a/Documentation/Examples/aqlArrayReverse_1.generated +++ /dev/null @@ -1,13 +0,0 @@ -@Q: -RETURN REVERSE ( [2,4,6,8,10] ) - -@R -[ - [ - 10, - 8, - 6, - 4, - 2 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayShift_1.generated b/Documentation/Examples/aqlArrayShift_1.generated deleted file mode 100644 index ca2603ba6642..000000000000 --- a/Documentation/Examples/aqlArrayShift_1.generated +++ /dev/null @@ -1,11 +0,0 @@ -@Q: -RETURN SHIFT( [ 1, 2, 3, 4 ] ) - -@R -[ - [ - 2, - 3, - 4 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayShift_2.generated b/Documentation/Examples/aqlArrayShift_2.generated deleted file mode 100644 index dd442ae685a1..000000000000 --- a/Documentation/Examples/aqlArrayShift_2.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -RETURN SHIFT( [ 1 ] ) - -@R -[ - [] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArraySlice_1.generated b/Documentation/Examples/aqlArraySlice_1.generated deleted file mode 100644 index 3fe0f3335105..000000000000 --- a/Documentation/Examples/aqlArraySlice_1.generated +++ /dev/null @@ -1,9 +0,0 @@ -@Q: -RETURN SLICE( [ 1, 2, 3, 4, 5 ], 0, 1 ) - -@R -[ - [ - 1 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArraySlice_2.generated b/Documentation/Examples/aqlArraySlice_2.generated deleted file mode 100644 index 9a5d7d85f226..000000000000 --- a/Documentation/Examples/aqlArraySlice_2.generated +++ /dev/null @@ -1,10 +0,0 @@ -@Q: -RETURN SLICE( [ 1, 2, 3, 4, 5 ], 1, 2 ) - -@R -[ - [ - 2, - 3 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArraySlice_3.generated b/Documentation/Examples/aqlArraySlice_3.generated deleted file mode 100644 index 255231871224..000000000000 --- a/Documentation/Examples/aqlArraySlice_3.generated +++ /dev/null @@ -1,10 +0,0 @@ -@Q: -RETURN SLICE( [ 1, 2, 3, 4, 5 ], 3 ) - -@R -[ - [ - 4, - 5 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArraySlice_4.generated b/Documentation/Examples/aqlArraySlice_4.generated deleted file mode 100644 index 192532990fec..000000000000 --- a/Documentation/Examples/aqlArraySlice_4.generated +++ /dev/null @@ -1,11 +0,0 @@ -@Q: -RETURN SLICE( [ 1, 2, 3, 4, 5 ], 1, -1 ) - -@R -[ - [ - 2, - 3, - 4 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArraySlice_5.generated b/Documentation/Examples/aqlArraySlice_5.generated deleted file mode 100644 index 8404efc4fba1..000000000000 --- a/Documentation/Examples/aqlArraySlice_5.generated +++ /dev/null @@ -1,11 +0,0 @@ -@Q: -RETURN SLICE( [ 1, 2, 3, 4, 5 ], 0, -2 ) - -@R -[ - [ - 1, - 2, - 3 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArraySlice_6.generated b/Documentation/Examples/aqlArraySlice_6.generated deleted file mode 100644 index c4250995afb3..000000000000 --- a/Documentation/Examples/aqlArraySlice_6.generated +++ /dev/null @@ -1,10 +0,0 @@ -@Q: -RETURN SLICE( [ 1, 2, 3, 4, 5 ], -3, 2 ) - -@R -[ - [ - 3, - 4 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArraySortedUnique_1.generated b/Documentation/Examples/aqlArraySortedUnique_1.generated deleted file mode 100644 index 3b2d89cf0b13..000000000000 --- a/Documentation/Examples/aqlArraySortedUnique_1.generated +++ /dev/null @@ -1,13 +0,0 @@ -@Q: -RETURN SORTED_UNIQUE( [ 8,4,2,10,6,2,8,6,4 ] ) - -@R -[ - [ - 2, - 4, - 6, - 8, - 10 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArraySorted_1.generated b/Documentation/Examples/aqlArraySorted_1.generated deleted file mode 100644 index 43d1587de03a..000000000000 --- a/Documentation/Examples/aqlArraySorted_1.generated +++ /dev/null @@ -1,13 +0,0 @@ -@Q: -RETURN SORTED( [ 8,4,2,10,6 ] ) - -@R -[ - [ - 2, - 4, - 6, - 8, - 10 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayUnionDistinct_1.generated b/Documentation/Examples/aqlArrayUnionDistinct_1.generated deleted file mode 100644 index ecd2f09a46bd..000000000000 --- a/Documentation/Examples/aqlArrayUnionDistinct_1.generated +++ /dev/null @@ -1,14 +0,0 @@ -@Q: -RETURN UNION_DISTINCT( - [ 1, 2, 3 ], - [ 1, 2 ] -) - -@R -[ - [ - 3, - 2, - 1 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayUnion_1.generated b/Documentation/Examples/aqlArrayUnion_1.generated deleted file mode 100644 index 63851d5abae0..000000000000 --- a/Documentation/Examples/aqlArrayUnion_1.generated +++ /dev/null @@ -1,16 +0,0 @@ -@Q: -RETURN UNION( - [ 1, 2, 3 ], - [ 1, 2 ] -) - -@R -[ - [ - 1, - 2, - 3, - 1, - 2 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayUnion_2.generated b/Documentation/Examples/aqlArrayUnion_2.generated deleted file mode 100644 index 17c22294f2fe..000000000000 --- a/Documentation/Examples/aqlArrayUnion_2.generated +++ /dev/null @@ -1,16 +0,0 @@ -@Q: -RETURN UNIQUE( - UNION( - [ 1, 2, 3 ], - [ 1, 2 ] - ) -) - -@R -[ - [ - 1, - 2, - 3 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayUnique_1.generated b/Documentation/Examples/aqlArrayUnique_1.generated deleted file mode 100644 index 42442303b15f..000000000000 --- a/Documentation/Examples/aqlArrayUnique_1.generated +++ /dev/null @@ -1,13 +0,0 @@ -@Q: -RETURN UNIQUE( [ 1,2,2,3,3,3,4,4,4,4,5,5,5,5,5 ] ) - -@R -[ - [ - 1, - 2, - 3, - 4, - 5 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayUnshift_1.generated b/Documentation/Examples/aqlArrayUnshift_1.generated deleted file mode 100644 index a260fcc97849..000000000000 --- a/Documentation/Examples/aqlArrayUnshift_1.generated +++ /dev/null @@ -1,12 +0,0 @@ -@Q: -RETURN UNSHIFT( [ 1, 2, 3 ], 4 ) - -@R -[ - [ - 4, - 1, - 2, - 3 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlArrayUnshift_2.generated b/Documentation/Examples/aqlArrayUnshift_2.generated deleted file mode 100644 index 237227853ec1..000000000000 --- a/Documentation/Examples/aqlArrayUnshift_2.generated +++ /dev/null @@ -1,11 +0,0 @@ -@Q: -RETURN UNSHIFT( [ 1, 2, 3 ], 2, true ) - -@R -[ - [ - 1, - 2, - 3 - ] -] \ No newline at end of file diff --git a/Documentation/Examples/aqlDateIso8601.generated b/Documentation/Examples/aqlDateIso8601.generated deleted file mode 100644 index 875265633ca0..000000000000 --- a/Documentation/Examples/aqlDateIso8601.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -RETURN DATE_ISO8601('2017-02-03T18:23:48Z') - -@R -[ - "2017-02-03T18:23:48.000Z" -] \ No newline at end of file diff --git a/Documentation/Examples/aqlGeoLineString_1.generated b/Documentation/Examples/aqlGeoLineString_1.generated deleted file mode 100644 index 822c5cd9328c..000000000000 --- a/Documentation/Examples/aqlGeoLineString_1.generated +++ /dev/null @@ -1,21 +0,0 @@ -@Q: -RETURN GEO_LINESTRING([ - [35, 10], [45, 45] -]) - -@R -[ - { - "type": "LineString", - "coordinates": [ - [ - 35, - 10 - ], - [ - 45, - 45 - ] - ] - } -] \ No newline at end of file diff --git a/Documentation/Examples/aqlGeoMultiLineString_1.generated b/Documentation/Examples/aqlGeoMultiLineString_1.generated deleted file mode 100644 index dd86a94e2075..000000000000 --- a/Documentation/Examples/aqlGeoMultiLineString_1.generated +++ /dev/null @@ -1,34 +0,0 @@ -@Q: -RETURN GEO_MULTILINESTRING([ - [[100.0, 0.0], [101.0, 1.0]], - [[102.0, 2.0], [101.0, 2.3]] -]) - -@R -[ - { - "type": "MultiLineString", - "coordinates": [ - [ - [ - 100, - 0 - ], - [ - 101, - 1 - ] - ], - [ - [ - 102, - 2 - ], - [ - 101, - 2.3 - ] - ] - ] - } -] \ No newline at end of file diff --git a/Documentation/Examples/aqlGeoMultiPoint_1.generated b/Documentation/Examples/aqlGeoMultiPoint_1.generated deleted file mode 100644 index 87f9576c0438..000000000000 --- a/Documentation/Examples/aqlGeoMultiPoint_1.generated +++ /dev/null @@ -1,21 +0,0 @@ -@Q: -RETURN GEO_MULTIPOINT([ - [35, 10], [45, 45] -]) - -@R -[ - { - "type": "MultiPoint", - "coordinates": [ - [ - 35, - 10 - ], - [ - 45, - 45 - ] - ] - } -] \ No newline at end of file diff --git a/Documentation/Examples/aqlGeoMultiPolygon_1.generated b/Documentation/Examples/aqlGeoMultiPolygon_1.generated deleted file mode 100644 index b02ce02d2679..000000000000 --- a/Documentation/Examples/aqlGeoMultiPolygon_1.generated +++ /dev/null @@ -1,85 +0,0 @@ -@Q: -RETURN GEO_MULTIPOLYGON([ - [ - [[40, 40], [20, 45], [45, 30], [40, 40]] - ], - [ - [[20, 35], [10, 30], [10, 10], [30, 5], [45, 20], [20, 35]], - [[30, 20], [20, 15], [20, 25], [30, 20]] - ] -]) - -@R -[ - { - "type": "MultiPolygon", - "coordinates": [ - [ - [ - [ - 40, - 40 - ], - [ - 20, - 45 - ], - [ - 45, - 30 - ], - [ - 40, - 40 - ] - ] - ], - [ - [ - [ - 20, - 35 - ], - [ - 10, - 30 - ], - [ - 10, - 10 - ], - [ - 30, - 5 - ], - [ - 45, - 20 - ], - [ - 20, - 35 - ] - ], - [ - [ - 30, - 20 - ], - [ - 20, - 15 - ], - [ - 20, - 25 - ], - [ - 30, - 20 - ] - ] - ] - ] - } -] \ No newline at end of file diff --git a/Documentation/Examples/aqlGeoPoint_1.generated b/Documentation/Examples/aqlGeoPoint_1.generated deleted file mode 100644 index bbb9561f38b7..000000000000 --- a/Documentation/Examples/aqlGeoPoint_1.generated +++ /dev/null @@ -1,13 +0,0 @@ -@Q: -RETURN GEO_POINT(1.0, 2.0) - -@R -[ - { - "type": "Point", - "coordinates": [ - 1, - 2 - ] - } -] \ No newline at end of file diff --git a/Documentation/Examples/aqlGeoPolygon_1.generated b/Documentation/Examples/aqlGeoPolygon_1.generated deleted file mode 100644 index 12cc78a83dde..000000000000 --- a/Documentation/Examples/aqlGeoPolygon_1.generated +++ /dev/null @@ -1,27 +0,0 @@ -@Q: -RETURN GEO_POLYGON([ - [0.0, 0.0], [7.5, 2.5], [0.0, 5.0] -]) - -@R -[ - { - "type": "Polygon", - "coordinates": [ - [ - [ - 0, - 0 - ], - [ - 7.5, - 2.5 - ], - [ - 0, - 5 - ] - ] - ] - } -] \ No newline at end of file diff --git a/Documentation/Examples/aqlGeoPolygon_2.generated b/Documentation/Examples/aqlGeoPolygon_2.generated deleted file mode 100644 index 2e92ea076f99..000000000000 --- a/Documentation/Examples/aqlGeoPolygon_2.generated +++ /dev/null @@ -1,54 +0,0 @@ -@Q: -RETURN GEO_POLYGON([ - [[35, 10], [45, 45], [15, 40], [10, 20], [35, 10]], - [[20, 30], [35, 35], [30, 20], [20, 30]] -]) - -@R -[ - { - "type": "Polygon", - "coordinates": [ - [ - [ - 35, - 10 - ], - [ - 45, - 45 - ], - [ - 15, - 40 - ], - [ - 10, - 20 - ], - [ - 35, - 10 - ] - ], - [ - [ - 20, - 30 - ], - [ - 35, - 35 - ], - [ - 30, - 20 - ], - [ - 20, - 30 - ] - ] - ] - } -] \ No newline at end of file diff --git a/Documentation/Examples/arangobench.json b/Documentation/Examples/arangobench.json deleted file mode 100644 index b17737267d65..000000000000 --- a/Documentation/Examples/arangobench.json +++ /dev/null @@ -1,842 +0,0 @@ -{ - "async" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "send asynchronous requests", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "batch-size" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "number of operations in one batch (0 disables batching)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "uint64" - }, - "check-configuration" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "check the configuration and exit", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "collection" : { - "category" : "option", - "default" : "ArangoBenchmark", - "deprecatedIn" : null, - "description" : "collection name to use in tests (if they involve collections)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "complexity" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "complexity parameter for the test (meaning depends on test case)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "uint64" - }, - "concurrency" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "number of parallel threads and connections", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "uint64" - }, - "config" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "the configuration file or 'none'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "configuration" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "the configuration file or 'none'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "define" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "define key=value for a @key@ entry in config file", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "delay" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "use a startup delay (necessary only when run in series)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "dump-dependencies" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "dump dependency graph", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "dump-options" : { - "category" : "command", - "default" : true, - "deprecatedIn" : null, - "description" : "dump configuration options in JSON format", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "junit-report-file" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "filename to write junit style report to", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "keep-alive" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "use HTTP keep-alive", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "log" : { - "category" : "option", - "default" : [ - "info" - ], - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "the global or topic-specific log level", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "log.color" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "use colors for TTY logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.escape" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "escape characters when logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.file" : { - "category" : "option", - "default" : "-", - "deprecatedIn" : null, - "description" : "shortcut for '--log.output file://'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.file-group" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "group to use for new log file, user must be a member of this group", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.file-mode" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "mode to use for new log file, umask will be applied as well", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.force-direct" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "do not start a seperate thread for logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.foreground-tty" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "also log to tty if backgrounded", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.ids" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "log unique message ids", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.keep-logrotate" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "keep the old log file after receiving a sighup", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.level" : { - "category" : "option", - "default" : [ - "info" - ], - "deprecatedIn" : null, - "description" : "the global or topic-specific log level", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string..." - }, - "log.line-number" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "append line number and file name", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.output" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "log destination(s)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string..." - }, - "log.performance" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "shortcut for '--log.level performance=trace'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.prefix" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "prefix log message with this string", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.request-parameters" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "include full URLs and HTTP request parameters in trace logs", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.role" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "log server role", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.shorten-filenames" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "shorten filenames in log output (use with --log.line-number)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.thread" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "show thread identifier in log message", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.thread-name" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "show thread name in log message", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.time-format" : { - "category" : "option", - "default" : "utc-datestring", - "deprecatedIn" : null, - "description" : "time format to use in logs", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string", - "values" : "Possible values: \"local-datestring\", \"timestamp\", \"timestamp-micros\", \"timestamp-millis\", \"uptime\", \"uptime-micros\", \"uptime-millis\", \"utc-datestring\"" - }, - "log.use-local-time" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "use local timezone instead of UTC", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.use-microtime" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "use microtime instead", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "number-of-shards" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "number of shards of created collections (cluster only)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "uint64" - }, - "progress" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "log intermediate progress", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "quiet" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "suppress status messages", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "random.generator" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "random number generator to use (1 = MERSENNE, 2 = RANDOM, 3 = URANDOM, 4 = COMBINED (not for Windows), 5 = WinCrypt (Windows only)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "random", - "type" : "uint32", - "values" : "Possible values: 1, 2, 3, 4" - }, - "replication-factor" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "replication factor of created collections (cluster only)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "uint64" - }, - "requests" : { - "category" : "option", - "default" : 1000, - "deprecatedIn" : null, - "description" : "total number of operations", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "uint64" - }, - "runs" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "run test n times (and calculate statistics based on median)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "uint64" - }, - "server.authentication" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "require authentication credentials when connecting (does not affect the server-side authentication settings)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "server", - "type" : "boolean" - }, - "server.connection-timeout" : { - "category" : "option", - "default" : 5, - "deprecatedIn" : null, - "description" : "connection timeout in seconds", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "double" - }, - "server.database" : { - "category" : "option", - "default" : "_system", - "deprecatedIn" : null, - "description" : "database name to use when connecting", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.endpoint" : { - "category" : "option", - "default" : "http+tcp://127.0.0.1:8529", - "deprecatedIn" : null, - "description" : "endpoint to connect to, use 'none' to start without a server", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.max-packet-size" : { - "category" : "option", - "default" : 1073741824, - "deprecatedIn" : null, - "description" : "maximum packet size (in bytes) for client/server communication", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "uint64" - }, - "server.password" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "password to use when connecting. If not specified and authentication is required, the user will be prompted for a password", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.request-timeout" : { - "category" : "option", - "default" : 1200, - "deprecatedIn" : null, - "description" : "request timeout in seconds", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "double" - }, - "server.username" : { - "category" : "option", - "default" : "root", - "deprecatedIn" : null, - "description" : "username to use when connecting", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "ssl.protocol" : { - "category" : "option", - "default" : 5, - "deprecatedIn" : null, - "description" : "ssl protocol (1 = SSLv2 (unsupported), 2 = SSLv2 or SSLv3 (negotiated), 3 = SSLv3, 4 = TLSv1, 5 = TLSv1.2)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ssl", - "type" : "uint64", - "values" : "Possible values: 1, 2, 3, 4, 5" - }, - "temp.path" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "path for temporary files", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "temp", - "type" : "string" - }, - "test-case" : { - "category" : "option", - "default" : "version", - "deprecatedIn" : null, - "description" : "test case to use", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string", - "values" : "Possible values: \"aqlinsert\", \"aqltrx\", \"aqlv8\", \"collection\", \"counttrx\", \"crud\", \"crud-append\", \"crud-write-read\", \"document\", \"edge\", \"hash\", \"import-document\", \"multi-collection\", \"multitrx\", \"random-shapes\", \"shapes\", \"shapes-append\", \"skiplist\", \"stream-cursor\", \"version\"" - }, - "verbose" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "print out replies if the HTTP header indicates DB errors", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "version" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "reports the version and exits", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "wait-for-sync" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "use waitForSync for created collections", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - } -} diff --git a/Documentation/Examples/arangod.json b/Documentation/Examples/arangod.json deleted file mode 100644 index 8a58cee09b5c..000000000000 --- a/Documentation/Examples/arangod.json +++ /dev/null @@ -1,3908 +0,0 @@ -{ - "agency.activate" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "Activate agency", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "agency", - "type" : "boolean" - }, - "agency.compaction-keep-size" : { - "category" : "option", - "default" : 50000, - "deprecatedIn" : null, - "description" : "keep as many indices before compaction point", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "agency", - "type" : "uint64" - }, - "agency.compaction-step-size" : { - "category" : "option", - "default" : 1000, - "deprecatedIn" : null, - "description" : "step size between state machine compactions", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "agency", - "type" : "uint64" - }, - "agency.disaster-recovery-id" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "allows for specification of the id for this agent; dangerous option for disaster recover only!", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "agency", - "type" : "string" - }, - "agency.election-timeout-max" : { - "category" : "option", - "default" : 5, - "deprecatedIn" : null, - "description" : "maximum timeout before an agent calls for new election [s]", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "agency", - "type" : "double" - }, - "agency.election-timeout-min" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "minimum timeout before an agent calls for new election [s]", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "agency", - "type" : "double" - }, - "agency.endpoint" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "agency endpoints", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "agency", - "type" : "string..." - }, - "agency.max-append-size" : { - "category" : "option", - "default" : 250, - "deprecatedIn" : null, - "description" : "maximum size of appendEntries document (# log entries)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "agency", - "type" : "uint64" - }, - "agency.my-address" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "which address to advertise to the outside", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "agency", - "type" : "string" - }, - "agency.pool-size" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "number of agent pool", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "agency", - "type" : "uint64" - }, - "agency.size" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "number of agents", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "agency", - "type" : "uint64" - }, - "agency.supervision" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "perform arangodb cluster supervision", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "agency", - "type" : "boolean" - }, - "agency.supervision-frequency" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "arangodb cluster supervision frequency [s]", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "agency", - "type" : "double" - }, - "agency.supervision-grace-period" : { - "category" : "option", - "default" : 10, - "deprecatedIn" : null, - "description" : "supervision time, after which a server is considered to have failed [s]", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "agency", - "type" : "double" - }, - "agency.wait-for-sync" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "wait for hard disk syncs on every persistence call (required in production)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "agency", - "type" : "boolean" - }, - "arangosearch.threads" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "the exact number of threads to use for asynchronous tasks (0 == autodetect)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "arangosearch", - "type" : "uint64" - }, - "arangosearch.threads-limit" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "upper limit to the autodetected number of threads to use for asynchronous tasks (0 == use default)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "arangosearch", - "type" : "uint64" - }, - "audit.hostname" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "hostname to use", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "audit", - "type" : "string" - }, - "audit.output" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "audit destination(s)", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "audit", - "type" : "string..." - }, - "cache.rebalancing-interval" : { - "category" : "option", - "default" : 2000000, - "deprecatedIn" : null, - "description" : "microseconds between rebalancing attempts", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "cache", - "type" : "uint64" - }, - "cache.size" : { - "category" : "option", - "default" : 3637388288, - "deprecatedIn" : null, - "description" : "size of cache in bytes", - "dynamic" : true, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "cache", - "type" : "uint64" - }, - "check-configuration" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "check the configuration and exit", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "cluster.agency-endpoint" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "agency endpoint to connect to", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "cluster", - "type" : "string..." - }, - "cluster.agency-prefix" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "agency prefix", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "cluster", - "type" : "string" - }, - "cluster.create-waits-for-sync-replication" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "active coordinator will wait for all replicas to create collection", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "cluster", - "type" : "boolean" - }, - "cluster.index-create-timeout" : { - "category" : "option", - "default" : 3600, - "deprecatedIn" : null, - "description" : "amount of time (in seconds) the coordinator will wait for an index to be created before giving up", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "cluster", - "type" : "double" - }, - "cluster.my-address" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "this server's endpoint (cluster internal)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "cluster", - "type" : "string" - }, - "cluster.my-advertised-endpoint" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "this server's advertised endpoint (e.g. external IP address or load balancer, optional)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "cluster", - "type" : "string" - }, - "cluster.my-role" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "this server's role", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "cluster", - "type" : "string" - }, - "cluster.require-persisted-id" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "if set to true, then the instance will only start if a UUID file is found in the database on startup. Setting this option will make sure the instance is started using an already existing database directory and not a new one. For the first start, the UUID file must either be created manually or the option must be set to false for the initial startup", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "cluster", - "type" : "boolean" - }, - "cluster.synchronous-replication-timeout-factor" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "all synchronous replication timeouts are multiplied by this factor", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "cluster", - "type" : "double" - }, - "cluster.synchronous-replication-timeout-per-4k" : { - "category" : "option", - "default" : 0.1, - "deprecatedIn" : null, - "description" : "all synchronous replication timeouts are increased by this amount per 4096 bytes (in seconds)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "cluster", - "type" : "double" - }, - "cluster.system-replication-factor" : { - "category" : "option", - "default" : 2, - "deprecatedIn" : null, - "description" : "replication factor for system collections", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "cluster", - "type" : "uint32" - }, - "compaction.db-sleep-time" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "sleep interval between two compaction runs (in s)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "compaction", - "type" : "double" - }, - "compaction.dead-documents-threshold" : { - "category" : "option", - "default" : 16384, - "deprecatedIn" : null, - "description" : "minimum unused count of documents in a datafile", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "compaction", - "type" : "uint64" - }, - "compaction.dead-size-percent-threshold" : { - "category" : "option", - "default" : 0.1, - "deprecatedIn" : null, - "description" : "how many percent of the source datafile should be unused at least", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "compaction", - "type" : "double" - }, - "compaction.dead-size-threshold" : { - "category" : "option", - "default" : 131072, - "deprecatedIn" : null, - "description" : "how many bytes of the source data file are allowed to be unused at most", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "compaction", - "type" : "uint64" - }, - "compaction.max-file-size-factor" : { - "category" : "option", - "default" : 3, - "deprecatedIn" : null, - "description" : "how large the resulting file may be in comparison to the collections '--database.maximal-journal-size' setting", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "compaction", - "type" : "uint64" - }, - "compaction.max-files" : { - "category" : "option", - "default" : 3, - "deprecatedIn" : null, - "description" : "Maximum number of files to merge to one file", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "compaction", - "type" : "uint64" - }, - "compaction.max-result-file-size" : { - "category" : "option", - "default" : 134217728, - "deprecatedIn" : null, - "description" : "how large may the compaction result file become (in bytes)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "compaction", - "type" : "uint64" - }, - "compaction.min-interval" : { - "category" : "option", - "default" : 10, - "deprecatedIn" : null, - "description" : "minimum sleep time between two compaction runs (in s)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "compaction", - "type" : "double" - }, - "compaction.min-small-data-file-size" : { - "category" : "option", - "default" : 131072, - "deprecatedIn" : null, - "description" : "minimal filesize threshhold original data files have to be below for a compaction", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "compaction", - "type" : "uint64" - }, - "config" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "the configuration file or 'none'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "configuration" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "the configuration file or 'none'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "console" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "start a JavaScript emergency console", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "daemon" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "background the server, running it as daemon", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "database.auto-upgrade" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "perform a database upgrade if necessary", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "database", - "type" : "boolean" - }, - "database.check-version" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "checks the versions of the database and exit", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "database", - "type" : "boolean" - }, - "database.directory" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "path to the database directory", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "database", - "type" : "string" - }, - "database.force-sync-properties" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "force syncing of collection properties to disk, will use waitForSync value of collection when turned off", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "database", - "type" : "boolean" - }, - "database.ignore-datafile-errors" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "load collections even if datafiles may contain errors", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "database", - "type" : "boolean" - }, - "database.init-database" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "initializes an empty database", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "database", - "type" : "boolean" - }, - "database.maximal-journal-size" : { - "category" : "option", - "default" : 33554432, - "deprecatedIn" : null, - "description" : "default maximal journal size, can be overwritten when creating a collection", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "database", - "type" : "uint64" - }, - "database.password" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "initial password of root user", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "database", - "type" : "string" - }, - "database.required-directory-state" : { - "category" : "option", - "default" : "any", - "deprecatedIn" : null, - "description" : "required state of database directory at startup (non-existing: database directory must not exist, existing: database directory must exist, empty: database directory must exist but be empty, populated: database directory must exist and contain specific files already, any: any state allowed)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "database", - "type" : "string", - "values" : "Possible values: \"any\", \"empty\", \"existing\", \"non-existing\", \"populated\"" - }, - "database.restore-admin" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "resets the admin users and sets a new password", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "database", - "type" : "boolean" - }, - "database.throw-collection-not-loaded-error" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "throw an error when accessing a collection that is still loading", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "database", - "type" : "boolean" - }, - "database.upgrade-check" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "skip a database upgrade", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "database", - "type" : "boolean" - }, - "database.wait-for-sync" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "default wait-for-sync behavior, can be overwritten when creating a collection", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "database", - "type" : "boolean" - }, - "default-language" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "ISO-639 language code", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "define" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "define key=value for a @key@ entry in config file", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "dump-dependencies" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "dump dependency graph", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "dump-options" : { - "category" : "command", - "default" : true, - "deprecatedIn" : null, - "description" : "dump configuration options in JSON format", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "fortune" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "show fortune cookie on startup", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "foxx.queues" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "enable Foxx queues", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "foxx", - "type" : "boolean" - }, - "foxx.queues-poll-interval" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "poll interval (in seconds) for Foxx queue manager", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "foxx", - "type" : "double" - }, - "frontend.proxy-request-check" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "enable proxy request checking", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "frontend", - "type" : "boolean" - }, - "frontend.trusted-proxy" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "list of proxies to trust (may be IP or network). Make sure --frontend.proxy-request-check is enabled", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "frontend", - "type" : "string..." - }, - "frontend.version-check" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "alert the user if new versions are available", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "frontend", - "type" : "boolean" - }, - "gid" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "switch to group-id after reading config files", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "http.allow-method-override" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "allow HTTP method override using special headers", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "http", - "type" : "boolean" - }, - "http.hide-product-header" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "do not expose \"Server: ArangoDB\" header in HTTP responses", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "http", - "type" : "boolean" - }, - "http.keep-alive-timeout" : { - "category" : "option", - "default" : 300, - "deprecatedIn" : null, - "description" : "keep-alive timeout in seconds", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "http", - "type" : "double" - }, - "http.trusted-origin" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "trusted origin URLs for CORS requests with credentials", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "http", - "type" : "string..." - }, - "hund" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "make ArangoDB bark on startup", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "javascript.allow-admin-execute" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "for testing purposes allow '_admin/execute', NEVER enable on production", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "javascript", - "type" : "boolean" - }, - "javascript.app-path" : { - "category" : "option", - "default" : "./js/apps", - "deprecatedIn" : null, - "description" : "directory for Foxx applications", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string" - }, - "javascript.copy-installation" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "copy contents of 'javascript.startup-directory' on first start", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "javascript", - "type" : "boolean" - }, - "javascript.enabled" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "enable the V8 JavaScript engine", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "javascript", - "type" : "boolean" - }, - "javascript.gc-frequency" : { - "category" : "option", - "default" : 60, - "deprecatedIn" : null, - "description" : "JavaScript time-based garbage collection frequency (each x seconds)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "double" - }, - "javascript.gc-interval" : { - "category" : "option", - "default" : 2000, - "deprecatedIn" : null, - "description" : "JavaScript request-based garbage collection interval (each x requests)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "uint64" - }, - "javascript.module-directory" : { - "category" : "option", - "default" : [ - "./enterprise/js" - ], - "deprecatedIn" : null, - "description" : "additional paths containing JavaScript modules", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string..." - }, - "javascript.script" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "run scripts and exit", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string..." - }, - "javascript.script-parameter" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "script parameter", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string..." - }, - "javascript.startup-directory" : { - "category" : "option", - "default" : "./js", - "deprecatedIn" : null, - "description" : "path to the directory containing JavaScript startup scripts", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string" - }, - "javascript.v8-contexts" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "maximum number of V8 contexts that are created for executing JavaScript actions", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "uint64" - }, - "javascript.v8-contexts-max-age" : { - "category" : "option", - "default" : 60, - "deprecatedIn" : null, - "description" : "maximum age for each V8 context (in seconds) before it is disposed", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "double" - }, - "javascript.v8-contexts-max-invocations" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "maximum number of invocations for each V8 context before it is disposed", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "uint64" - }, - "javascript.v8-contexts-minimum" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "minimum number of V8 contexts that keep available for executing JavaScript actions", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "uint64" - }, - "javascript.v8-max-heap" : { - "category" : "option", - "default" : 3072, - "deprecatedIn" : null, - "description" : "maximal heap size (in MB)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "uint64" - }, - "javascript.v8-options" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "options to pass to v8", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string..." - }, - "ldap.async-connect" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "Whether or not the connection to the LDAP library will be done asynchronously", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "ldap", - "type" : "boolean" - }, - "ldap.basedn" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "ldap basedn, eg. dc=example,dc=com", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "string" - }, - "ldap.binddn" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "ldap binddn, eg. cn=admin,dc=example,dc=com", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "string" - }, - "ldap.bindpasswd" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "ldap bindpassword, eg. admin", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "string" - }, - "ldap.debug" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "Turn on internal OpenLDAP library output (warning: will print to stdout)", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "ldap", - "type" : "boolean" - }, - "ldap.enabled" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "enable LDAP", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "ldap", - "type" : "boolean" - }, - "ldap.network-timeout" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "Timeout value (in seconds) after which network operations following the initial connection return in case of no activity (a value of 0 means default timeout)", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "double" - }, - "ldap.port" : { - "category" : "option", - "default" : 389, - "deprecatedIn" : null, - "description" : "port to use", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "uint16" - }, - "ldap.prefix" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "ldap prefix, eg. uid= xor dn= xor cn=", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "string" - }, - "ldap.referrals" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "Whether or not the LDAP library should implicitly chase referrals", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "ldap", - "type" : "boolean" - }, - "ldap.refresh-rate" : { - "category" : "option", - "default" : 300, - "deprecatedIn" : null, - "description" : "Refresh user settings after this time (in seconds)", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "double" - }, - "ldap.restart" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "Whether or not the LDAP library should implicitly restart connections", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "ldap", - "type" : "boolean" - }, - "ldap.retries" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "Number of tries to attempt connecting to the LDAP server. Setting it to values greater than one will retry connecting in case the LDAP server is unavailable or denies the connection", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "uint32" - }, - "ldap.roles-attribute-name" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "ldap attributename where the role are located.", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "string" - }, - "ldap.roles-exclude" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "Regexp to exclude groups. Leave empty to exclude none.", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "string" - }, - "ldap.roles-include" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "Regexp to include groups. Leave empty to include all.", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "string" - }, - "ldap.roles-search" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "ldap search for roles; '{USER}' is replaced by the 'dn' of the user.", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "string" - }, - "ldap.roles-transformation" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "Regexp to normalizer role name, e.g. '/^ *(.*[^ ]])*/$2/'", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "string..." - }, - "ldap.search-attribute" : { - "category" : "option", - "default" : "uid", - "deprecatedIn" : null, - "description" : "ldap search attribute, eg. uid", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "string" - }, - "ldap.search-filter" : { - "category" : "option", - "default" : "objectClass=*", - "deprecatedIn" : null, - "description" : "ldap search filter, eg. (objectClass=simpleSecurityObject)", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "string" - }, - "ldap.search-scope" : { - "category" : "option", - "default" : "sub", - "deprecatedIn" : null, - "description" : "ldap search scope, one of base, one, sub", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "string" - }, - "ldap.serialize-timeout" : { - "category" : "option", - "default" : 5, - "deprecatedIn" : null, - "description" : "Maximum amount of time (in seconds) that will be waited for the serialization mutex", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "double" - }, - "ldap.serialized" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "Whether or not calls into the LDAP library should be serialized. This option can be used to work around thread-unsafe LDAP library functionality", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "ldap", - "type" : "boolean" - }, - "ldap.server" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "server to use", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "string" - }, - "ldap.suffix" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "ldap suffix, eg. ,dc=example,dc=com", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "string" - }, - "ldap.superuser-role" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "Role mapping to the super-users", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "string" - }, - "ldap.timeout" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "Timeout value (in seconds) for synchronous LDAP API calls (a value of 0 means default timeout)", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "double" - }, - "ldap.tls" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "enable TLS", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "ldap", - "type" : "boolean" - }, - "ldap.tls-cacert-dir" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "ldap tls cacert dir", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "string" - }, - "ldap.tls-cacert-file" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "ldap tls cacert file", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "string" - }, - "ldap.tls-cert-check-strategy" : { - "category" : "option", - "default" : "hard", - "deprecatedIn" : null, - "description" : "ldap tls cert check strategy, one of never, hard, demand, allow, try", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "string" - }, - "ldap.tls-version" : { - "category" : "option", - "default" : "1.2", - "deprecatedIn" : null, - "description" : "ldap tls version, one of 1.0, 1.1, 1.2", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "string" - }, - "ldap.url" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "ldap url, eg. ldap://example.com:389/dc=example,dc=com?uid?sub", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ldap", - "type" : "string" - }, - "log" : { - "category" : "option", - "default" : [ - "info", - "info" - ], - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "the global or topic-specific log level", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "log.color" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "use colors for TTY logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.escape" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "escape characters when logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.file" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "shortcut for '--log.output file://'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.file-group" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "group to use for new log file, user must be a member of this group", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.file-mode" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "mode to use for new log file, umask will be applied as well", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.force-direct" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "do not start a seperate thread for logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.foreground-tty" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "also log to tty if backgrounded", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.ids" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "log unique message ids", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.keep-logrotate" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "keep the old log file after receiving a sighup", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.level" : { - "category" : "option", - "default" : [ - "info", - "info" - ], - "deprecatedIn" : null, - "description" : "the global or topic-specific log level", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string..." - }, - "log.line-number" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "append line number and file name", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.output" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "log destination(s)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string..." - }, - "log.performance" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "shortcut for '--log.level performance=trace'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.prefix" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "prefix log message with this string", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.request-parameters" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "include full URLs and HTTP request parameters in trace logs", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.role" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "log server role", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.shorten-filenames" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "shorten filenames in log output (use with --log.line-number)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.thread" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "show thread identifier in log message", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.thread-name" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "show thread name in log message", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.time-format" : { - "category" : "option", - "default" : "utc-datestring", - "deprecatedIn" : null, - "description" : "time format to use in logs", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string", - "values" : "Possible values: \"local-datestring\", \"timestamp\", \"timestamp-micros\", \"timestamp-millis\", \"uptime\", \"uptime-micros\", \"uptime-millis\", \"utc-datestring\"" - }, - "log.use-local-time" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "use local timezone instead of UTC", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.use-microtime" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "use microtime instead", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "nonce.size" : { - "category" : "option", - "default" : 4194304, - "deprecatedIn" : null, - "description" : "the size of the hash array for nonces", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "nonce", - "type" : "uint64" - }, - "pid-file" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "pid-file in daemon mode", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "query.cache-entries" : { - "category" : "option", - "default" : 128, - "deprecatedIn" : null, - "description" : "maximum number of results in query result cache per database", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "query", - "type" : "uint64" - }, - "query.cache-entries-max-size" : { - "category" : "option", - "default" : 268435456, - "deprecatedIn" : null, - "description" : "maximum cumulated size of results in query result cache per database", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "query", - "type" : "uint64" - }, - "query.cache-entry-max-size" : { - "category" : "option", - "default" : 16777216, - "deprecatedIn" : null, - "description" : "maximum size of an invidiual result entry in query result cache", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "query", - "type" : "uint64" - }, - "query.cache-include-system-collections" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "whether or not to include system collection queries in the query result cache", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "query", - "type" : "boolean" - }, - "query.cache-mode" : { - "category" : "option", - "default" : "off", - "deprecatedIn" : null, - "description" : "mode for the AQL query result cache (on, off, demand)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "query", - "type" : "string" - }, - "query.fail-on-warning" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "whether AQL queries should fail with errors even for recoverable warnings", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "query", - "type" : "boolean" - }, - "query.memory-limit" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "memory threshold for AQL queries (in bytes)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "query", - "type" : "uint64" - }, - "query.optimizer-max-plans" : { - "category" : "option", - "default" : 128, - "deprecatedIn" : null, - "description" : "maximum number of query plans to create for a query", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "query", - "type" : "uint64" - }, - "query.registry-ttl" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "default time-to-live of cursors and query snippets (in seconds); if <= 0, value will default to 30 for single-server instances or 600 for cluster instances", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "query", - "type" : "double" - }, - "query.slow-streaming-threshold" : { - "category" : "option", - "default" : 10, - "deprecatedIn" : null, - "description" : "threshold for slow streaming AQL queries (in seconds)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "query", - "type" : "double" - }, - "query.slow-threshold" : { - "category" : "option", - "default" : 10, - "deprecatedIn" : null, - "description" : "threshold for slow AQL queries (in seconds)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "query", - "type" : "double" - }, - "query.smart-joins" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "enable smart joins query optimization", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : true, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : false, - "section" : "query", - "type" : "boolean" - }, - "query.tracking" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "whether to track slow AQL queries", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "query", - "type" : "boolean" - }, - "query.tracking-with-bindvars" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "whether to track bind vars with AQL queries", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "query", - "type" : "boolean" - }, - "random.generator" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "random number generator to use (1 = MERSENNE, 2 = RANDOM, 3 = URANDOM, 4 = COMBINED (not for Windows), 5 = WinCrypt (Windows only)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "random", - "type" : "uint32", - "values" : "Possible values: 1, 2, 3, 4" - }, - "replication.active-failover" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "Enable active-failover during asynchronous replication", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "replication", - "type" : "boolean" - }, - "replication.auto-start" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "switch to enable or disable the automatic start of replication appliers", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "replication", - "type" : "boolean" - }, - "replication.automatic-failover" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "Please use `--replication.active-failover` instead", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "replication", - "type" : "boolean" - }, - "rocksdb.allow-fallocate" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "if true, allow RocksDB to use fallocate calls. if false, fallocate calls are bypassed", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : false, - "section" : "rocksdb", - "type" : "boolean" - }, - "rocksdb.block-align-data-blocks" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "if true, aligns data blocks on lesser of page size and block size", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "rocksdb", - "type" : "boolean" - }, - "rocksdb.block-cache-shard-bits" : { - "category" : "option", - "default" : -1, - "deprecatedIn" : null, - "description" : "number of shard bits to use for block cache (use -1 for default value)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "int64" - }, - "rocksdb.block-cache-size" : { - "category" : "option", - "default" : 4364865945, - "deprecatedIn" : null, - "description" : "size of block cache in bytes", - "dynamic" : true, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "uint64" - }, - "rocksdb.compaction-read-ahead-size" : { - "category" : "option", - "default" : 2097152, - "deprecatedIn" : null, - "description" : "if non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. that way RocksDB's compaction is doing sequential instead of random reads.", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "uint64" - }, - "rocksdb.debug-logging" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "true to enable rocksdb debug logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "rocksdb", - "type" : "boolean" - }, - "rocksdb.delayed-write-rate" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "limited write rate to DB (in bytes per second) if we are writing to the last mem-table allowed and we allow more than 3 mem-tables, or if we have surpassed a certain number of level-0 files and need to slowdown writes", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "uint64" - }, - "rocksdb.dynamic-level-bytes" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "if true, determine the number of bytes for each level dynamically to minimize space amplification", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "rocksdb", - "type" : "boolean" - }, - "rocksdb.enable-pipelined-write" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "if true, use a two stage write queue for WAL writes and memtable writes", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "rocksdb", - "type" : "boolean" - }, - "rocksdb.enable-statistics" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "whether or not RocksDB statistics should be turned on", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "rocksdb", - "type" : "boolean" - }, - "rocksdb.encryption-key-generator" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "program providing the encryption key on stdout. If set, encryption will be enabled.", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "string" - }, - "rocksdb.encryption-keyfile" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "file containing encryption key. If set, encryption will be enabled.", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "string" - }, - "rocksdb.enforce-block-cache-size-limit" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "if true, strictly enforces the block cache size limit", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "rocksdb", - "type" : "boolean" - }, - "rocksdb.intermediate-commit-count" : { - "category" : "option", - "default" : 1000000, - "deprecatedIn" : null, - "description" : "an intermediate commit will be performed automatically when this number of operations is reached in a transaction", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "uint64" - }, - "rocksdb.intermediate-commit-size" : { - "category" : "option", - "default" : 536870912, - "deprecatedIn" : null, - "description" : "an intermediate commit will be performed automatically when a transaction has accumulated operations of this size (in bytes)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "uint64" - }, - "rocksdb.level0-compaction-trigger" : { - "category" : "option", - "default" : 2, - "deprecatedIn" : null, - "description" : "number of level-0 files that triggers a compaction", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "int64" - }, - "rocksdb.level0-slowdown-trigger" : { - "category" : "option", - "default" : 20, - "deprecatedIn" : null, - "description" : "number of level-0 files that triggers a write slowdown", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "int64" - }, - "rocksdb.level0-stop-trigger" : { - "category" : "option", - "default" : 36, - "deprecatedIn" : null, - "description" : "number of level-0 files that triggers a full write stall", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "int64" - }, - "rocksdb.limit-open-files-at-startup" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "limit the amount of .sst files RocksDB will inspect at startup, in order to startup reduce IO", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : false, - "section" : "rocksdb", - "type" : "boolean" - }, - "rocksdb.max-background-jobs" : { - "category" : "option", - "default" : 8, - "deprecatedIn" : null, - "description" : "Maximum number of concurrent background jobs (compactions and flushes)", - "dynamic" : true, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "int32" - }, - "rocksdb.max-bytes-for-level-base" : { - "category" : "option", - "default" : 268435456, - "deprecatedIn" : null, - "description" : "if not using dynamic level sizes, this controls the maximum total data size for level-1", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "uint64" - }, - "rocksdb.max-bytes-for-level-multiplier" : { - "category" : "option", - "default" : 10, - "deprecatedIn" : null, - "description" : "if not using dynamic level sizes, the maximum number of bytes for level L can be calculated as max-bytes-for-level-base * (max-bytes-for-level-multiplier ^ (L-1))", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "double" - }, - "rocksdb.max-subcompactions" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "maximum number of concurrent subjobs for a background compaction", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "uint64" - }, - "rocksdb.max-total-wal-size" : { - "category" : "option", - "default" : 83886080, - "deprecatedIn" : null, - "description" : "maximum total size of WAL files that will force flush stale column families", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "uint64" - }, - "rocksdb.max-transaction-size" : { - "category" : "option", - "default" : 18446744073709551615, - "deprecatedIn" : null, - "description" : "transaction size limit (in bytes)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "uint64" - }, - "rocksdb.max-write-buffer-number" : { - "category" : "option", - "default" : 2, - "deprecatedIn" : null, - "description" : "maximum number of write buffers that built up in memory", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "uint64" - }, - "rocksdb.min-write-buffer-number-to-merge" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "minimum number of write buffers that will be merged together before writing to storage", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "uint64" - }, - "rocksdb.num-levels" : { - "category" : "option", - "default" : 7, - "deprecatedIn" : null, - "description" : "number of levels for the database", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "uint64" - }, - "rocksdb.num-threads-priority-high" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "number of threads for high priority operations (e.g. flush)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "uint32" - }, - "rocksdb.num-threads-priority-low" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "number of threads for low priority operations (e.g. compaction)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "uint32" - }, - "rocksdb.num-uncompressed-levels" : { - "category" : "option", - "default" : 2, - "deprecatedIn" : null, - "description" : "number of uncompressed levels for the database", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "uint64" - }, - "rocksdb.optimize-filters-for-hits" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "this flag specifies that the implementation should optimize the filters mainly for cases where keys are found rather than also optimize for keys missed. This would be used in cases where the application knows that there are very few misses or the performance in the case of misses is not important", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "rocksdb", - "type" : "boolean" - }, - "rocksdb.recycle-log-file-num" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "if true, keep a pool of log files around for recycling", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "rocksdb", - "type" : "boolean" - }, - "rocksdb.sync-interval" : { - "category" : "option", - "default" : 100, - "deprecatedIn" : null, - "description" : "interval for automatic, non-requested disk syncs (in milliseconds, use 0 to turn automatic syncing off)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "uint64" - }, - "rocksdb.table-block-size" : { - "category" : "option", - "default" : 16384, - "deprecatedIn" : null, - "description" : "approximate size (in bytes) of user data packed per block", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "uint64" - }, - "rocksdb.throttle" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "enable write-throttling", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "rocksdb", - "type" : "boolean" - }, - "rocksdb.total-write-buffer-size" : { - "category" : "option", - "default" : 5819821260, - "deprecatedIn" : null, - "description" : "maximum total size of in-memory write buffers (0 = unbounded)", - "dynamic" : true, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "uint64" - }, - "rocksdb.transaction-lock-timeout" : { - "category" : "option", - "default" : 1000, - "deprecatedIn" : null, - "description" : "If positive, specifies the wait timeout in milliseconds when a transaction attempts to lock a document. A negative value is not recommended as it can lead to deadlocks (0 = no waiting, < 0 no timeout)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "int64" - }, - "rocksdb.use-direct-io-for-flush-and-compaction" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "use O_DIRECT for flush and compaction", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "rocksdb", - "type" : "boolean" - }, - "rocksdb.use-direct-reads" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "use O_DIRECT for reading files", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "rocksdb", - "type" : "boolean" - }, - "rocksdb.use-file-logging" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "use a file-base logger for RocksDB's own logs", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "rocksdb", - "type" : "boolean" - }, - "rocksdb.use-fsync" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "issue an fsync when writing to disk (set to true for issuing fdatasync only)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "rocksdb", - "type" : "boolean" - }, - "rocksdb.wal-archive-size-limit" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "maximum total size (in bytes) of archived WAL files (0 = unlimited)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "uint64" - }, - "rocksdb.wal-directory" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "optional path to the RocksDB WAL directory. If not set, the WAL directory will be located inside the regular data directory", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "string" - }, - "rocksdb.wal-file-timeout" : { - "category" : "option", - "default" : 10, - "deprecatedIn" : null, - "description" : "timeout after which unused WAL files are deleted", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "double" - }, - "rocksdb.wal-file-timeout-initial" : { - "category" : "option", - "default" : 180, - "deprecatedIn" : null, - "description" : "initial timeout after which unused WAL files deletion kicks in after server start", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "double" - }, - "rocksdb.wal-recovery-skip-corrupted" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "skip corrupted records in WAL recovery", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "rocksdb", - "type" : "boolean" - }, - "rocksdb.write-buffer-size" : { - "category" : "option", - "default" : 67108864, - "deprecatedIn" : null, - "description" : "amount of data to build up in memory before converting to a sorted on-disk file (0 = disabled)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "rocksdb", - "type" : "uint64" - }, - "server.allow-use-database" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "allow change of database in REST actions, only needed for unittests", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "server", - "type" : "boolean" - }, - "server.authentication" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "enable authentication for ALL client requests", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "server", - "type" : "boolean" - }, - "server.authentication-system-only" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "use HTTP authentication only for requests to /_api and /_admin", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "server", - "type" : "boolean" - }, - "server.authentication-timeout" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "timeout for the authentication cache in seconds (0 = indefinitely)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "double" - }, - "server.authentication-unix-sockets" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "authentication for requests via UNIX domain sockets", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "server", - "type" : "boolean" - }, - "server.check-max-memory-mappings" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "check the maximum number of memory mappings at runtime", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "server", - "type" : "boolean" - }, - "server.descriptors-minimum" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "minimum number of file descriptors needed to start", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "uint64" - }, - "server.endpoint" : { - "category" : "option", - "default" : [ - "tcp://0.0.0.0:8529" - ], - "deprecatedIn" : null, - "description" : "endpoint for client requests (e.g. 'http+tcp://127.0.0.1:8529', or 'vst+ssl://192.168.1.1:8529')", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string..." - }, - "server.flush-interval" : { - "category" : "option", - "default" : 1000000, - "deprecatedIn" : null, - "description" : "interval (in microseconds) for flushing data", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "uint64" - }, - "server.gid" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "switch to group-id after reading config files", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.io-threads" : { - "category" : "option", - "default" : 2, - "deprecatedIn" : null, - "description" : "Number of threads used to handle IO", - "dynamic" : true, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "uint64" - }, - "server.jwt-secret" : { - "category" : "option", - "default" : "", - "deprecatedIn" : [ - "v3.3.22", - "v3.4.2" - ], - "description" : "secret to use when doing jwt authentication", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.jwt-secret-keyfile" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "file containing jwt secret to use when doing jwt authentication.", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.local-authentication" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "enable authentication using the local user database", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "server", - "type" : "boolean" - }, - "server.maintenance-actions-block" : { - "category" : "option", - "default" : 2, - "deprecatedIn" : null, - "description" : "minimum number of seconds finished Actions block duplicates", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "int32" - }, - "server.maintenance-actions-linger" : { - "category" : "option", - "default" : 3600, - "deprecatedIn" : null, - "description" : "minimum number of seconds finished Actions remain in deque", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "int32" - }, - "server.maintenance-threads" : { - "category" : "option", - "default" : 3, - "deprecatedIn" : null, - "description" : "maximum number of threads available for maintenance actions", - "dynamic" : true, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "uint32" - }, - "server.maximal-queue-size" : { - "category" : "option", - "default" : 4096, - "deprecatedIn" : null, - "description" : "size of the priority 2 fifo", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "uint64" - }, - "server.maximal-threads" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "maximum number of request handling threads to run (0 = use system-specific default of 64)", - "dynamic" : true, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "uint64" - }, - "server.minimal-threads" : { - "category" : "option", - "default" : 2, - "deprecatedIn" : null, - "description" : "minimum number of request handling threads to run", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "uint64" - }, - "server.prio1-size" : { - "category" : "option", - "default" : 1048576, - "deprecatedIn" : null, - "description" : "size of the priority 1 fifo", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "uint64" - }, - "server.rest-server" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "start a rest-server", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "server", - "type" : "boolean" - }, - "server.scheduler-queue-size" : { - "category" : "option", - "default" : 128, - "deprecatedIn" : null, - "description" : "number of simultaneously queued requests inside the scheduler", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "uint64" - }, - "server.statistics" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "turn statistics gathering on or off", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "server", - "type" : "boolean" - }, - "server.storage-engine" : { - "category" : "option", - "default" : "auto", - "deprecatedIn" : null, - "description" : "storage engine type", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string", - "values" : "Possible values: \"auto\", \"mmfiles\", \"rocksdb\"" - }, - "server.uid" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "switch to user-id after reading config files", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "ssl.cafile" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "ca file used for secure connections", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ssl", - "type" : "string" - }, - "ssl.cipher-list" : { - "category" : "option", - "default" : "HIGH:!EXPORT:!aNULL@STRENGTH", - "deprecatedIn" : null, - "description" : "ssl ciphers to use, see OpenSSL documentation", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ssl", - "type" : "string" - }, - "ssl.ecdh-curve" : { - "category" : "option", - "default" : "prime256v1", - "deprecatedIn" : null, - "description" : "SSL ECDH Curve, see the output of \"openssl ecparam -list_curves\"", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ssl", - "type" : "string" - }, - "ssl.keyfile" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "key-file used for secure connections", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ssl", - "type" : "string" - }, - "ssl.options" : { - "category" : "option", - "default" : 2147485780, - "deprecatedIn" : null, - "description" : "ssl connection options, see OpenSSL documentation", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ssl", - "type" : "uint64" - }, - "ssl.protocol" : { - "category" : "option", - "default" : 5, - "deprecatedIn" : null, - "description" : "ssl protocol (1 = SSLv2 (unsupported), 2 = SSLv2 or SSLv3 (negotiated), 3 = SSLv3, 4 = TLSv1, 5 = TLSv1.2)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ssl", - "type" : "uint64", - "values" : "Possible values: 1, 2, 3, 4, 5" - }, - "ssl.require-peer-certificate" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "require a peer certificate when connecting", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "ssl", - "type" : "boolean" - }, - "ssl.session-cache" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "enable the session cache for connections", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "ssl", - "type" : "boolean" - }, - "supervisor" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "background the server, starts a supervisor", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "tcp.backlog-size" : { - "category" : "option", - "default" : 64, - "deprecatedIn" : null, - "description" : "listen backlog size", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "tcp", - "type" : "uint64" - }, - "tcp.reuse-address" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "try to reuse TCP port(s)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "tcp", - "type" : "boolean" - }, - "temp.path" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "path for temporary files", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "temp", - "type" : "string" - }, - "ttl.frequency" : { - "category" : "option", - "default" : 30000, - "deprecatedIn" : null, - "description" : "frequency (in milliseconds) for the TTL background thread invocation. a value of 0 turns the TTL background thread off entirely", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ttl", - "type" : "uint64" - }, - "ttl.max-collection-removes" : { - "category" : "option", - "default" : 1000000, - "deprecatedIn" : null, - "description" : "maximum number of documents to remove per collection", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ttl", - "type" : "uint64" - }, - "ttl.max-total-removes" : { - "category" : "option", - "default" : 1000000, - "deprecatedIn" : null, - "description" : "maximum number of documents to remove per invocation of the TTL thread", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ttl", - "type" : "uint64" - }, - "ttl.only-loaded-collection" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "only consider already loaded collections for removal", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "ttl", - "type" : "boolean" - }, - "uid" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "switch to user-id after reading config files", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "version" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "reports the version and exits", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "vst.maxsize" : { - "category" : "option", - "default" : 30720, - "deprecatedIn" : null, - "description" : "maximal size (in bytes) for a VelocyPack chunk", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "vst", - "type" : "uint32" - }, - "wal.allow-oversize-entries" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "allow entries that are bigger than '--wal.logfile-size'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "wal", - "type" : "boolean" - }, - "wal.directory" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "logfile directory", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "wal", - "type" : "string" - }, - "wal.flush-timeout" : { - "category" : "option", - "default" : 15000, - "deprecatedIn" : null, - "description" : "flush timeout (in milliseconds)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "wal", - "type" : "uint64" - }, - "wal.historic-logfiles" : { - "category" : "option", - "default" : 10, - "deprecatedIn" : null, - "description" : "maximum number of historic logfiles to keep after collection", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "wal", - "type" : "uint32" - }, - "wal.ignore-logfile-errors" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "ignore logfile errors. this will read recoverable data from corrupted logfiles but ignore any unrecoverable data", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "wal", - "type" : "boolean" - }, - "wal.ignore-recovery-errors" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "continue recovery even if re-applying operations fails", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "wal", - "type" : "boolean" - }, - "wal.logfile-size" : { - "category" : "option", - "default" : 33554432, - "deprecatedIn" : null, - "description" : "size of each logfile (in bytes)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "wal", - "type" : "uint32" - }, - "wal.open-logfiles" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "maximum number of parallel open logfiles", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "wal", - "type" : "uint32" - }, - "wal.reserve-logfiles" : { - "category" : "option", - "default" : 3, - "deprecatedIn" : null, - "description" : "maximum number of reserve logfiles to maintain", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "wal", - "type" : "uint32" - }, - "wal.slots" : { - "category" : "option", - "default" : 1048576, - "deprecatedIn" : null, - "description" : "number of logfile slots to use", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "wal", - "type" : "uint32" - }, - "wal.sync-interval" : { - "category" : "option", - "default" : 100, - "deprecatedIn" : null, - "description" : "interval for automatic, non-requested disk syncs (in milliseconds)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "wal", - "type" : "uint64" - }, - "wal.throttle-wait" : { - "category" : "option", - "default" : 15000, - "deprecatedIn" : null, - "description" : "maximum wait time per operation when write-throttled (in milliseconds)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "wal", - "type" : "uint64" - }, - "wal.throttle-when-pending" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "throttle writes when at least this many operations are waiting for collection (set to 0 to deactivate write-throttling)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "wal", - "type" : "uint64" - }, - "wal.use-mlock" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "mlock WAL logfiles in memory (may require elevated privileges or limits)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "wal", - "type" : "boolean" - }, - "working-directory" : { - "category" : "option", - "default" : "/var/tmp", - "deprecatedIn" : null, - "description" : "working directory in daemon mode", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - } -} diff --git a/Documentation/Examples/arangodump.json b/Documentation/Examples/arangodump.json deleted file mode 100644 index 7f382a5af118..000000000000 --- a/Documentation/Examples/arangodump.json +++ /dev/null @@ -1,850 +0,0 @@ -{ - "all-databases" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "dump data of all databases", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "batch-size" : { - "category" : "option", - "default" : 67108864, - "deprecatedIn" : null, - "description" : "maximum size for individual data batches (in bytes)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "uint64" - }, - "check-configuration" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "check the configuration and exit", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "collection" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "restrict to collection name (can be specified multiple times)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "compress-output" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "compress files containing collection contents using gzip format", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.4.6", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "config" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "the configuration file or 'none'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "configuration" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "the configuration file or 'none'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "define" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "define key=value for a @key@ entry in config file", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "dump-data" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "dump collection data", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "dump-dependencies" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "dump dependency graph", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "dump-options" : { - "category" : "command", - "default" : true, - "deprecatedIn" : null, - "description" : "dump configuration options in JSON format", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "encryption.key-generator" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "program providing the encryption key on stdout. If set, encryption will be enabled.", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "encryption", - "type" : "string" - }, - "encryption.keyfile" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "file containing the encryption key. If set, encryption will be enabled.", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "encryption", - "type" : "string" - }, - "force" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "continue dumping even in the face of some server-side errors", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "ignore-distribute-shards-like-errors" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "continue dump even if sharding prototype collection is not backed up along", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "include-system-collections" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "include system collections", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "initial-batch-size" : { - "category" : "option", - "default" : 8388608, - "deprecatedIn" : null, - "description" : "initial size for individual data batches (in bytes)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "uint64" - }, - "log" : { - "category" : "option", - "default" : [ - "info" - ], - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "the global or topic-specific log level", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "log.color" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "use colors for TTY logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.escape" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "escape characters when logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.file" : { - "category" : "option", - "default" : "-", - "deprecatedIn" : null, - "description" : "shortcut for '--log.output file://'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.file-group" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "group to use for new log file, user must be a member of this group", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.file-mode" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "mode to use for new log file, umask will be applied as well", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.force-direct" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "do not start a seperate thread for logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.foreground-tty" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "also log to tty if backgrounded", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.ids" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "log unique message ids", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.keep-logrotate" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "keep the old log file after receiving a sighup", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.level" : { - "category" : "option", - "default" : [ - "info" - ], - "deprecatedIn" : null, - "description" : "the global or topic-specific log level", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string..." - }, - "log.line-number" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "append line number and file name", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.output" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "log destination(s)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string..." - }, - "log.performance" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "shortcut for '--log.level performance=trace'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.prefix" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "prefix log message with this string", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.request-parameters" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "include full URLs and HTTP request parameters in trace logs", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.role" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "log server role", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.shorten-filenames" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "shorten filenames in log output (use with --log.line-number)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.thread" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "show thread identifier in log message", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.thread-name" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "show thread name in log message", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.time-format" : { - "category" : "option", - "default" : "utc-datestring", - "deprecatedIn" : null, - "description" : "time format to use in logs", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string", - "values" : "Possible values: \"local-datestring\", \"timestamp\", \"timestamp-micros\", \"timestamp-millis\", \"uptime\", \"uptime-micros\", \"uptime-millis\", \"utc-datestring\"" - }, - "log.use-local-time" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "use local timezone instead of UTC", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.use-microtime" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "use microtime instead", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "maskings" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "file with maskings definition", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.3.22", - "v3.4.2" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "output-directory" : { - "category" : "option", - "default" : "/home/steemann/ArangoNoAsan/dump", - "deprecatedIn" : null, - "description" : "output directory", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "overwrite" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "overwrite data in output directory", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "progress" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "show progress", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "random.generator" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "random number generator to use (1 = MERSENNE, 2 = RANDOM, 3 = URANDOM, 4 = COMBINED (not for Windows), 5 = WinCrypt (Windows only)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "random", - "type" : "uint32", - "values" : "Possible values: 1, 2, 3, 4" - }, - "server.authentication" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "require authentication credentials when connecting (does not affect the server-side authentication settings)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "server", - "type" : "boolean" - }, - "server.connection-timeout" : { - "category" : "option", - "default" : 5, - "deprecatedIn" : null, - "description" : "connection timeout in seconds", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "double" - }, - "server.database" : { - "category" : "option", - "default" : "_system", - "deprecatedIn" : null, - "description" : "database name to use when connecting", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.endpoint" : { - "category" : "option", - "default" : "http+tcp://127.0.0.1:8529", - "deprecatedIn" : null, - "description" : "endpoint to connect to, use 'none' to start without a server", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.max-packet-size" : { - "category" : "option", - "default" : 1073741824, - "deprecatedIn" : null, - "description" : "maximum packet size (in bytes) for client/server communication", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "uint64" - }, - "server.password" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "password to use when connecting. If not specified and authentication is required, the user will be prompted for a password", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.request-timeout" : { - "category" : "option", - "default" : 1200, - "deprecatedIn" : null, - "description" : "request timeout in seconds", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "double" - }, - "server.username" : { - "category" : "option", - "default" : "root", - "deprecatedIn" : null, - "description" : "username to use when connecting", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "ssl.protocol" : { - "category" : "option", - "default" : 5, - "deprecatedIn" : null, - "description" : "ssl protocol (1 = SSLv2 (unsupported), 2 = SSLv2 or SSLv3 (negotiated), 3 = SSLv3, 4 = TLSv1, 5 = TLSv1.2)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ssl", - "type" : "uint64", - "values" : "Possible values: 1, 2, 3, 4, 5" - }, - "threads" : { - "category" : "option", - "default" : 2, - "deprecatedIn" : null, - "description" : "maximum number of collections to process in parallel. From v3.4.0", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "uint32" - }, - "tick-end" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "last tick to be included in data dump", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "uint64" - }, - "tick-start" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "only include data after this tick", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "uint64" - }, - "version" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "reports the version and exits", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - } -} diff --git a/Documentation/Examples/arangoexport.json b/Documentation/Examples/arangoexport.json deleted file mode 100644 index 9593dc606cbc..000000000000 --- a/Documentation/Examples/arangoexport.json +++ /dev/null @@ -1,745 +0,0 @@ -{ - "check-configuration" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "check the configuration and exit", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "collection" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "restrict to collection name (can be specified multiple times)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "config" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "the configuration file or 'none'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "configuration" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "the configuration file or 'none'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "define" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "define key=value for a @key@ entry in config file", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "dump-dependencies" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "dump dependency graph", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "dump-options" : { - "category" : "command", - "default" : true, - "deprecatedIn" : null, - "description" : "dump configuration options in JSON format", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "fields" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "comma separated list of fileds to export into a csv file", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "graph-name" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "name of a graph to export", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "log" : { - "category" : "option", - "default" : [ - "info" - ], - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "the global or topic-specific log level", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "log.color" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "use colors for TTY logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.escape" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "escape characters when logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.file" : { - "category" : "option", - "default" : "-", - "deprecatedIn" : null, - "description" : "shortcut for '--log.output file://'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.file-group" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "group to use for new log file, user must be a member of this group", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.file-mode" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "mode to use for new log file, umask will be applied as well", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.force-direct" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "do not start a seperate thread for logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.foreground-tty" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "also log to tty if backgrounded", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.ids" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "log unique message ids", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.keep-logrotate" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "keep the old log file after receiving a sighup", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.level" : { - "category" : "option", - "default" : [ - "info" - ], - "deprecatedIn" : null, - "description" : "the global or topic-specific log level", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string..." - }, - "log.line-number" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "append line number and file name", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.output" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "log destination(s)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string..." - }, - "log.performance" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "shortcut for '--log.level performance=trace'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.prefix" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "prefix log message with this string", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.request-parameters" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "include full URLs and HTTP request parameters in trace logs", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.role" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "log server role", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.shorten-filenames" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "shorten filenames in log output (use with --log.line-number)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.thread" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "show thread identifier in log message", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.thread-name" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "show thread name in log message", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.time-format" : { - "category" : "option", - "default" : "utc-datestring", - "deprecatedIn" : null, - "description" : "time format to use in logs", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string", - "values" : "Possible values: \"local-datestring\", \"timestamp\", \"timestamp-micros\", \"timestamp-millis\", \"uptime\", \"uptime-micros\", \"uptime-millis\", \"utc-datestring\"" - }, - "log.use-local-time" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "use local timezone instead of UTC", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.use-microtime" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "use microtime instead", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "output-directory" : { - "category" : "option", - "default" : "/home/steemann/ArangoNoAsan/export", - "deprecatedIn" : null, - "description" : "output directory", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "overwrite" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "overwrite data in output directory", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "progress" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "show progress", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "query" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "AQL query to run", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "random.generator" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "random number generator to use (1 = MERSENNE, 2 = RANDOM, 3 = URANDOM, 4 = COMBINED (not for Windows), 5 = WinCrypt (Windows only)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "random", - "type" : "uint32", - "values" : "Possible values: 1, 2, 3, 4" - }, - "server.authentication" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "require authentication credentials when connecting (does not affect the server-side authentication settings)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "server", - "type" : "boolean" - }, - "server.connection-timeout" : { - "category" : "option", - "default" : 5, - "deprecatedIn" : null, - "description" : "connection timeout in seconds", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "double" - }, - "server.database" : { - "category" : "option", - "default" : "_system", - "deprecatedIn" : null, - "description" : "database name to use when connecting", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.endpoint" : { - "category" : "option", - "default" : "http+tcp://127.0.0.1:8529", - "deprecatedIn" : null, - "description" : "endpoint to connect to, use 'none' to start without a server", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.max-packet-size" : { - "category" : "option", - "default" : 1073741824, - "deprecatedIn" : null, - "description" : "maximum packet size (in bytes) for client/server communication", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "uint64" - }, - "server.password" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "password to use when connecting. If not specified and authentication is required, the user will be prompted for a password", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.request-timeout" : { - "category" : "option", - "default" : 1200, - "deprecatedIn" : null, - "description" : "request timeout in seconds", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "double" - }, - "server.username" : { - "category" : "option", - "default" : "root", - "deprecatedIn" : null, - "description" : "username to use when connecting", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "ssl.protocol" : { - "category" : "option", - "default" : 5, - "deprecatedIn" : null, - "description" : "ssl protocol (1 = SSLv2 (unsupported), 2 = SSLv2 or SSLv3 (negotiated), 3 = SSLv3, 4 = TLSv1, 5 = TLSv1.2)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ssl", - "type" : "uint64", - "values" : "Possible values: 1, 2, 3, 4, 5" - }, - "temp.path" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "path for temporary files", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "temp", - "type" : "string" - }, - "type" : { - "category" : "option", - "default" : "json", - "deprecatedIn" : null, - "description" : "type of export", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string", - "values" : "Possible values: \"csv\", \"json\", \"jsonl\", \"xgmml\", \"xml\"" - }, - "version" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "reports the version and exits", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "xgmml-label-attribute" : { - "category" : "option", - "default" : "label", - "deprecatedIn" : null, - "description" : "specify document attribute that will be the xgmml label", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "xgmml-label-only" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "export only xgmml label", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - } -} diff --git a/Documentation/Examples/arangoimport.json b/Documentation/Examples/arangoimport.json deleted file mode 100644 index f58676626235..000000000000 --- a/Documentation/Examples/arangoimport.json +++ /dev/null @@ -1,916 +0,0 @@ -{ - "backslash-escape" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "use backslash as the escape character for quotes, used for csv", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "batch-size" : { - "category" : "option", - "default" : 1048576, - "deprecatedIn" : null, - "description" : "size for individual data batches (in bytes)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "uint64" - }, - "check-configuration" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "check the configuration and exit", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "collection" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "collection name", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "config" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "the configuration file or 'none'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "configuration" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "the configuration file or 'none'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "convert" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "convert the strings 'null', 'false', 'true' and strings containing numbers into non-string types (csv and tsv only)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "create-collection" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "create collection if it does not yet exist", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "create-collection-type" : { - "category" : "option", - "default" : "document", - "deprecatedIn" : null, - "description" : "type of collection if collection is created (edge or document)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string", - "values" : "Possible values: \"document\", \"edge\"" - }, - "create-database" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "create the target database if it does not exist", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "define" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "define key=value for a @key@ entry in config file", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "dump-dependencies" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "dump dependency graph", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "dump-options" : { - "category" : "command", - "default" : true, - "deprecatedIn" : null, - "description" : "dump configuration options in JSON format", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "file" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "file name (\"-\" for STDIN)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "from-collection-prefix" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "_from collection name prefix (will be prepended to all values in '_from')", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "ignore-missing" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "ignore missing columns in csv input", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "latency" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "show 10 second latency statistics (values in microseconds)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "log" : { - "category" : "option", - "default" : [ - "info" - ], - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "the global or topic-specific log level", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "log.color" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "use colors for TTY logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.escape" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "escape characters when logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.file" : { - "category" : "option", - "default" : "-", - "deprecatedIn" : null, - "description" : "shortcut for '--log.output file://'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.file-group" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "group to use for new log file, user must be a member of this group", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.file-mode" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "mode to use for new log file, umask will be applied as well", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.force-direct" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "do not start a seperate thread for logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.foreground-tty" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "also log to tty if backgrounded", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.ids" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "log unique message ids", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.keep-logrotate" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "keep the old log file after receiving a sighup", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.level" : { - "category" : "option", - "default" : [ - "info" - ], - "deprecatedIn" : null, - "description" : "the global or topic-specific log level", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string..." - }, - "log.line-number" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "append line number and file name", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.output" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "log destination(s)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string..." - }, - "log.performance" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "shortcut for '--log.level performance=trace'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.prefix" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "prefix log message with this string", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.request-parameters" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "include full URLs and HTTP request parameters in trace logs", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.role" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "log server role", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.shorten-filenames" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "shorten filenames in log output (use with --log.line-number)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.thread" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "show thread identifier in log message", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.thread-name" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "show thread name in log message", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.time-format" : { - "category" : "option", - "default" : "utc-datestring", - "deprecatedIn" : null, - "description" : "time format to use in logs", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string", - "values" : "Possible values: \"local-datestring\", \"timestamp\", \"timestamp-micros\", \"timestamp-millis\", \"uptime\", \"uptime-micros\", \"uptime-millis\", \"utc-datestring\"" - }, - "log.use-local-time" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "use local timezone instead of UTC", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.use-microtime" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "use microtime instead", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "on-duplicate" : { - "category" : "option", - "default" : "error", - "deprecatedIn" : null, - "description" : "action to perform when a unique key constraint violation occurs. Possible values: ignore, replace, update, error", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string", - "values" : "Possible values: \"error\", \"ignore\", \"replace\", \"update\"" - }, - "overwrite" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "overwrite collection if it exist (WARNING: this will remove any data from the collection)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "progress" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "show progress", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "quote" : { - "category" : "option", - "default" : "\"", - "deprecatedIn" : null, - "description" : "quote character(s), used for csv", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "random.generator" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "random number generator to use (1 = MERSENNE, 2 = RANDOM, 3 = URANDOM, 4 = COMBINED (not for Windows), 5 = WinCrypt (Windows only)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "random", - "type" : "uint32", - "values" : "Possible values: 1, 2, 3, 4" - }, - "remove-attribute" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "remove an attribute before inserting an attribute into a collection (for csv and tsv only)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "separator" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "field separator, used for csv and tsv", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "server.authentication" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "require authentication credentials when connecting (does not affect the server-side authentication settings)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "server", - "type" : "boolean" - }, - "server.connection-timeout" : { - "category" : "option", - "default" : 5, - "deprecatedIn" : null, - "description" : "connection timeout in seconds", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "double" - }, - "server.database" : { - "category" : "option", - "default" : "_system", - "deprecatedIn" : null, - "description" : "database name to use when connecting", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.endpoint" : { - "category" : "option", - "default" : "http+tcp://127.0.0.1:8529", - "deprecatedIn" : null, - "description" : "endpoint to connect to, use 'none' to start without a server", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.max-packet-size" : { - "category" : "option", - "default" : 1073741824, - "deprecatedIn" : null, - "description" : "maximum packet size (in bytes) for client/server communication", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "uint64" - }, - "server.password" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "password to use when connecting. If not specified and authentication is required, the user will be prompted for a password", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.request-timeout" : { - "category" : "option", - "default" : 1200, - "deprecatedIn" : null, - "description" : "request timeout in seconds", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "double" - }, - "server.username" : { - "category" : "option", - "default" : "root", - "deprecatedIn" : null, - "description" : "username to use when connecting", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "skip-lines" : { - "category" : "option", - "default" : 0, - "deprecatedIn" : null, - "description" : "number of lines to skip for formats (csv and tsv only)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "uint64" - }, - "ssl.protocol" : { - "category" : "option", - "default" : 5, - "deprecatedIn" : null, - "description" : "ssl protocol (1 = SSLv2 (unsupported), 2 = SSLv2 or SSLv3 (negotiated), 3 = SSLv3, 4 = TLSv1, 5 = TLSv1.2)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ssl", - "type" : "uint64", - "values" : "Possible values: 1, 2, 3, 4, 5" - }, - "temp.path" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "path for temporary files", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "temp", - "type" : "string" - }, - "threads" : { - "category" : "option", - "default" : 2, - "deprecatedIn" : null, - "description" : "Number of parallel import threads. Most useful for the rocksdb engine", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "uint32" - }, - "to-collection-prefix" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "_to collection name prefix (will be prepended to all values in '_to')", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "translate" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "translate an attribute name (use as --translate \"from=to\", for csv and tsv only)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "type" : { - "category" : "option", - "default" : "json", - "deprecatedIn" : null, - "description" : "type of import file", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string", - "values" : "Possible values: \"auto\", \"csv\", \"json\", \"jsonl\", \"tsv\"" - }, - "version" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "reports the version and exits", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - } -} diff --git a/Documentation/Examples/arangoinspect.json b/Documentation/Examples/arangoinspect.json deleted file mode 100644 index 5c339eca93e1..000000000000 --- a/Documentation/Examples/arangoinspect.json +++ /dev/null @@ -1,977 +0,0 @@ -{ - "check-configuration" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "check the configuration and exit", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "config" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "the configuration file or 'none'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "configuration" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "the configuration file or 'none'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "console.audit-file" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "audit log file to save commands and results", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "console", - "type" : "string" - }, - "console.auto-complete" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "enable auto completion", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "console", - "type" : "boolean" - }, - "console.colors" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "enable color support", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "console", - "type" : "boolean" - }, - "console.history" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "whether or not to load and persist command-line history", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : false, - "section" : "console", - "type" : "boolean" - }, - "console.pager" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "enable paging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "console", - "type" : "boolean" - }, - "console.pager-command" : { - "category" : "option", - "default" : "less -X -R -F -L", - "deprecatedIn" : null, - "description" : "pager command", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "console", - "type" : "string" - }, - "console.pretty-print" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "enable pretty printing", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "console", - "type" : "boolean" - }, - "console.prompt" : { - "category" : "option", - "default" : "%E@%d> ", - "deprecatedIn" : null, - "description" : "prompt used in REPL. prompt components are: '%t': current time as timestamp, '%p': duration of last command in seconds, '%d': name of current database, '%e': current endpoint, '%E': current endpoint without protocol, '%u': current user", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "console", - "type" : "string" - }, - "default-language" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "ISO-639 language code", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "define" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "define key=value for a @key@ entry in config file", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "dump-dependencies" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "dump dependency graph", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "dump-options" : { - "category" : "command", - "default" : true, - "deprecatedIn" : null, - "description" : "dump configuration options in JSON format", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "javascript.check-syntax" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "syntax check code Javascript code from file", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string..." - }, - "javascript.client-module" : { - "category" : "option", - "default" : "inspector.js", - "deprecatedIn" : null, - "description" : "client module to use at startup", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string" - }, - "javascript.copy-directory" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "target directory to copy files from 'javascript.startup-directory' into(only used when `--javascript.copy-installation` is enabled)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string" - }, - "javascript.copy-installation" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "copy contents of 'javascript.startup-directory'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "javascript", - "type" : "boolean" - }, - "javascript.current-module-directory" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "add current directory to module path", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "javascript", - "type" : "boolean" - }, - "javascript.execute" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "execute Javascript code from file", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string..." - }, - "javascript.execute-string" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "execute Javascript code from string", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string..." - }, - "javascript.gc-interval" : { - "category" : "option", - "default" : 50, - "deprecatedIn" : null, - "description" : "request-based garbage collection interval (each n.th command)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "uint64" - }, - "javascript.module-directory" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "additional paths containing JavaScript modules", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string..." - }, - "javascript.startup-directory" : { - "category" : "option", - "default" : "./js", - "deprecatedIn" : null, - "description" : "startup paths containing the Javascript files", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string" - }, - "javascript.unit-test-filter" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "filter testcases in suite", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string" - }, - "javascript.unit-tests" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "do not start as shell, run unit tests instead", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string..." - }, - "javascript.v8-max-heap" : { - "category" : "option", - "default" : 3072, - "deprecatedIn" : null, - "description" : "maximal heap size (in MB)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "uint64" - }, - "javascript.v8-options" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "options to pass to v8", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string..." - }, - "jslint" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "do not start as shell, run jslint instead", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "log" : { - "category" : "option", - "default" : [ - "info" - ], - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "the global or topic-specific log level", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "log.color" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "use colors for TTY logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.escape" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "escape characters when logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.file" : { - "category" : "option", - "default" : "-", - "deprecatedIn" : null, - "description" : "shortcut for '--log.output file://'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.file-group" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "group to use for new log file, user must be a member of this group", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.file-mode" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "mode to use for new log file, umask will be applied as well", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.force-direct" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "do not start a seperate thread for logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.foreground-tty" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "also log to tty if backgrounded", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.ids" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "log unique message ids", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.keep-logrotate" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "keep the old log file after receiving a sighup", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.level" : { - "category" : "option", - "default" : [ - "info" - ], - "deprecatedIn" : null, - "description" : "the global or topic-specific log level", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string..." - }, - "log.line-number" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "append line number and file name", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.output" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "log destination(s)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string..." - }, - "log.performance" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "shortcut for '--log.level performance=trace'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.prefix" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "prefix log message with this string", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.request-parameters" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "include full URLs and HTTP request parameters in trace logs", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.role" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "log server role", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.shorten-filenames" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "shorten filenames in log output (use with --log.line-number)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.thread" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "show thread identifier in log message", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.thread-name" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "show thread name in log message", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.time-format" : { - "category" : "option", - "default" : "utc-datestring", - "deprecatedIn" : null, - "description" : "time format to use in logs", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string", - "values" : "Possible values: \"local-datestring\", \"timestamp\", \"timestamp-micros\", \"timestamp-millis\", \"uptime\", \"uptime-micros\", \"uptime-millis\", \"utc-datestring\"" - }, - "log.use-local-time" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "use local timezone instead of UTC", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.use-microtime" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "use microtime instead", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "quiet" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "silent startup", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "random.generator" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "random number generator to use (1 = MERSENNE, 2 = RANDOM, 3 = URANDOM, 4 = COMBINED (not for Windows), 5 = WinCrypt (Windows only)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "random", - "type" : "uint32", - "values" : "Possible values: 1, 2, 3, 4" - }, - "server.ask-jwt-secret" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "if this option is specified, the user will be prompted for a JWT secret. This option is not compatible with --server.username or --server.password. If specified, it will be used for all connections - even when a new connection to another server is created", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "server", - "type" : "boolean" - }, - "server.authentication" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "require authentication credentials when connecting (does not affect the server-side authentication settings)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "server", - "type" : "boolean" - }, - "server.connection-timeout" : { - "category" : "option", - "default" : 5, - "deprecatedIn" : null, - "description" : "connection timeout in seconds", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "double" - }, - "server.database" : { - "category" : "option", - "default" : "_system", - "deprecatedIn" : null, - "description" : "database name to use when connecting", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.endpoint" : { - "category" : "option", - "default" : "http+tcp://127.0.0.1:8529", - "deprecatedIn" : null, - "description" : "endpoint to connect to, use 'none' to start without a server", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.max-packet-size" : { - "category" : "option", - "default" : 1073741824, - "deprecatedIn" : null, - "description" : "maximum packet size (in bytes) for client/server communication", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "uint64" - }, - "server.password" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "password to use when connecting. If not specified and authentication is required, the user will be prompted for a password", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.request-timeout" : { - "category" : "option", - "default" : 1200, - "deprecatedIn" : null, - "description" : "request timeout in seconds", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "double" - }, - "server.username" : { - "category" : "option", - "default" : "root", - "deprecatedIn" : null, - "description" : "username to use when connecting", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "ssl.protocol" : { - "category" : "option", - "default" : 5, - "deprecatedIn" : null, - "description" : "ssl protocol (1 = SSLv2 (unsupported), 2 = SSLv2 or SSLv3 (negotiated), 3 = SSLv3, 4 = TLSv1, 5 = TLSv1.2)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ssl", - "type" : "uint64", - "values" : "Possible values: 1, 2, 3, 4, 5" - }, - "temp.path" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "path for temporary files", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "temp", - "type" : "string" - }, - "version" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "reports the version and exits", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - } -} diff --git a/Documentation/Examples/arangorestore.json b/Documentation/Examples/arangorestore.json deleted file mode 100644 index d24e63da8315..000000000000 --- a/Documentation/Examples/arangorestore.json +++ /dev/null @@ -1,934 +0,0 @@ -{ - "all-databases" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "restore data to all databases", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "batch-size" : { - "category" : "option", - "default" : 8388608, - "deprecatedIn" : null, - "description" : "maximum size for individual data batches (in bytes)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "uint64" - }, - "check-configuration" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "check the configuration and exit", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "cleanup-duplicate-attributes" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "clean up duplicate attributes (use first specified value) in input documents instead of making the restore operation fail", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : [ - "v3.3.22", - "v3.4.2" - ], - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "collection" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "restrict to collection name (can be specified multiple times)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "config" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "the configuration file or 'none'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "configuration" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "the configuration file or 'none'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "create-collection" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "create collection structure", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "create-database" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "create the target database if it does not exist", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "default-number-of-shards" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : [ - "v3.3.22", - "v3.4.2" - ], - "description" : "default value for numberOfShards if not specified in dump", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "uint64" - }, - "default-replication-factor" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : [ - "v3.3.22", - "v3.4.2" - ], - "description" : "default value for replicationFactor if not specified in dump", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "uint64" - }, - "define" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "define key=value for a @key@ entry in config file", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "dump-dependencies" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "dump dependency graph", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "dump-options" : { - "category" : "command", - "default" : true, - "deprecatedIn" : null, - "description" : "dump configuration options in JSON format", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "encryption.key-generator" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "program providing the encryption key on stdout. If set, encryption will be enabled.", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "encryption", - "type" : "string" - }, - "encryption.keyfile" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "file containing the encryption key. If set, encryption will be enabled.", - "dynamic" : false, - "enterpriseOnly" : true, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "encryption", - "type" : "string" - }, - "force" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "continue restore even in the face of some server-side errors", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "force-same-database" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "force usage of the same database name as in the source dump.json file", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "ignore-distribute-shards-like-errors" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "continue restore even if sharding prototype collection is missing", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "import-data" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "import data into collection", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "include-system-collections" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "include system collections", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "input-directory" : { - "category" : "option", - "default" : "/home/steemann/ArangoNoAsan/dump", - "deprecatedIn" : null, - "description" : "input directory", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "log" : { - "category" : "option", - "default" : [ - "info" - ], - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "the global or topic-specific log level", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "log.color" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "use colors for TTY logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.escape" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "escape characters when logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.file" : { - "category" : "option", - "default" : "-", - "deprecatedIn" : null, - "description" : "shortcut for '--log.output file://'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.file-group" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "group to use for new log file, user must be a member of this group", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.file-mode" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "mode to use for new log file, umask will be applied as well", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.force-direct" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "do not start a seperate thread for logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.foreground-tty" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "also log to tty if backgrounded", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.ids" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "log unique message ids", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.keep-logrotate" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "keep the old log file after receiving a sighup", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.level" : { - "category" : "option", - "default" : [ - "info" - ], - "deprecatedIn" : null, - "description" : "the global or topic-specific log level", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string..." - }, - "log.line-number" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "append line number and file name", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.output" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "log destination(s)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string..." - }, - "log.performance" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "shortcut for '--log.level performance=trace'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.prefix" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "prefix log message with this string", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.request-parameters" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "include full URLs and HTTP request parameters in trace logs", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.role" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "log server role", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.shorten-filenames" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "shorten filenames in log output (use with --log.line-number)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.thread" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "show thread identifier in log message", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.thread-name" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "show thread name in log message", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.time-format" : { - "category" : "option", - "default" : "utc-datestring", - "deprecatedIn" : null, - "description" : "time format to use in logs", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string", - "values" : "Possible values: \"local-datestring\", \"timestamp\", \"timestamp-micros\", \"timestamp-millis\", \"uptime\", \"uptime-micros\", \"uptime-millis\", \"utc-datestring\"" - }, - "log.use-local-time" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "use local timezone instead of UTC", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.use-microtime" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "use microtime instead", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "number-of-shards" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "override value for numberOfShards (can be specified multiple times, e.g. --numberOfShards 2 --numberOfShards myCollection=3)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.3.22", - "v3.4.2" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "overwrite" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "overwrite collections if they exist", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "progress" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "show progress", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "random.generator" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "random number generator to use (1 = MERSENNE, 2 = RANDOM, 3 = URANDOM, 4 = COMBINED (not for Windows), 5 = WinCrypt (Windows only)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "random", - "type" : "uint32", - "values" : "Possible values: 1, 2, 3, 4" - }, - "replication-factor" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "override value for replicationFactor (can be specified multiple times, e.g. --replicationFactor 2 --replicationFactor myCollection=3)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.3.22", - "v3.4.2" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "server.authentication" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "require authentication credentials when connecting (does not affect the server-side authentication settings)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "server", - "type" : "boolean" - }, - "server.connection-timeout" : { - "category" : "option", - "default" : 5, - "deprecatedIn" : null, - "description" : "connection timeout in seconds", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "double" - }, - "server.database" : { - "category" : "option", - "default" : "_system", - "deprecatedIn" : null, - "description" : "database name to use when connecting", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.endpoint" : { - "category" : "option", - "default" : "http+tcp://127.0.0.1:8529", - "deprecatedIn" : null, - "description" : "endpoint to connect to, use 'none' to start without a server", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.max-packet-size" : { - "category" : "option", - "default" : 1073741824, - "deprecatedIn" : null, - "description" : "maximum packet size (in bytes) for client/server communication", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "uint64" - }, - "server.password" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "password to use when connecting. If not specified and authentication is required, the user will be prompted for a password", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.request-timeout" : { - "category" : "option", - "default" : 1200, - "deprecatedIn" : null, - "description" : "request timeout in seconds", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "double" - }, - "server.username" : { - "category" : "option", - "default" : "root", - "deprecatedIn" : null, - "description" : "username to use when connecting", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "ssl.protocol" : { - "category" : "option", - "default" : 5, - "deprecatedIn" : null, - "description" : "ssl protocol (1 = SSLv2 (unsupported), 2 = SSLv2 or SSLv3 (negotiated), 3 = SSLv3, 4 = TLSv1, 5 = TLSv1.2)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ssl", - "type" : "uint64", - "values" : "Possible values: 1, 2, 3, 4, 5" - }, - "temp.path" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "path for temporary files", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "temp", - "type" : "string" - }, - "threads" : { - "category" : "option", - "default" : 2, - "deprecatedIn" : null, - "description" : "maximum number of collections to process in parallel", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.4.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "uint32" - }, - "version" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "reports the version and exits", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "view" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "restrict to view name (can be specified multiple times)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - } -} diff --git a/Documentation/Examples/arangosh.json b/Documentation/Examples/arangosh.json deleted file mode 100644 index 3f43429181ba..000000000000 --- a/Documentation/Examples/arangosh.json +++ /dev/null @@ -1,978 +0,0 @@ -{ - "check-configuration" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "check the configuration and exit", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "config" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "the configuration file or 'none'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "configuration" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "the configuration file or 'none'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "console.audit-file" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "audit log file to save commands and results", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "console", - "type" : "string" - }, - "console.auto-complete" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "enable auto completion", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "console", - "type" : "boolean" - }, - "console.colors" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "enable color support", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "console", - "type" : "boolean" - }, - "console.history" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "whether or not to load and persist command-line history", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : false, - "section" : "console", - "type" : "boolean" - }, - "console.pager" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "enable paging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "console", - "type" : "boolean" - }, - "console.pager-command" : { - "category" : "option", - "default" : "less -X -R -F -L", - "deprecatedIn" : null, - "description" : "pager command", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "console", - "type" : "string" - }, - "console.pretty-print" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "enable pretty printing", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "console", - "type" : "boolean" - }, - "console.prompt" : { - "category" : "option", - "default" : "%E@%d> ", - "deprecatedIn" : null, - "description" : "prompt used in REPL. prompt components are: '%t': current time as timestamp, '%p': duration of last command in seconds, '%d': name of current database, '%e': current endpoint, '%E': current endpoint without protocol, '%u': current user", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "console", - "type" : "string" - }, - "default-language" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "ISO-639 language code", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string" - }, - "define" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "define key=value for a @key@ entry in config file", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "dump-dependencies" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "dump dependency graph", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "dump-options" : { - "category" : "command", - "default" : true, - "deprecatedIn" : null, - "description" : "dump configuration options in JSON format", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "javascript.check-syntax" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "syntax check code Javascript code from file", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string..." - }, - "javascript.client-module" : { - "category" : "option", - "default" : "client.js", - "deprecatedIn" : null, - "description" : "client module to use at startup", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string" - }, - "javascript.copy-directory" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "target directory to copy files from 'javascript.startup-directory' into(only used when `--javascript.copy-installation` is enabled)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string" - }, - "javascript.copy-installation" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "copy contents of 'javascript.startup-directory'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "javascript", - "type" : "boolean" - }, - "javascript.current-module-directory" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "add current directory to module path", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "javascript", - "type" : "boolean" - }, - "javascript.execute" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "execute Javascript code from file", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string..." - }, - "javascript.execute-string" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "execute Javascript code from string", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string..." - }, - "javascript.gc-interval" : { - "category" : "option", - "default" : 50, - "deprecatedIn" : null, - "description" : "request-based garbage collection interval (each n.th command)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "uint64" - }, - "javascript.module-directory" : { - "category" : "option", - "default" : [ - "./enterprise/js" - ], - "deprecatedIn" : null, - "description" : "additional paths containing JavaScript modules", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string..." - }, - "javascript.startup-directory" : { - "category" : "option", - "default" : "./js", - "deprecatedIn" : null, - "description" : "startup paths containing the Javascript files", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string" - }, - "javascript.unit-test-filter" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "filter testcases in suite", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string" - }, - "javascript.unit-tests" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "do not start as shell, run unit tests instead", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string..." - }, - "javascript.v8-max-heap" : { - "category" : "option", - "default" : 3072, - "deprecatedIn" : null, - "description" : "maximal heap size (in MB)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "uint64" - }, - "javascript.v8-options" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "options to pass to v8", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "javascript", - "type" : "string..." - }, - "jslint" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "do not start as shell, run jslint instead", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "log" : { - "category" : "option", - "default" : [ - "info" - ], - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "the global or topic-specific log level", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "", - "type" : "string..." - }, - "log.color" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "use colors for TTY logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.escape" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "escape characters when logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.file" : { - "category" : "option", - "default" : "-", - "deprecatedIn" : null, - "description" : "shortcut for '--log.output file://'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.file-group" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "group to use for new log file, user must be a member of this group", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.file-mode" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "mode to use for new log file, umask will be applied as well", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.4.5", - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.force-direct" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "do not start a seperate thread for logging", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.foreground-tty" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "also log to tty if backgrounded", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.ids" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "log unique message ids", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.keep-logrotate" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "keep the old log file after receiving a sighup", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.level" : { - "category" : "option", - "default" : [ - "info" - ], - "deprecatedIn" : null, - "description" : "the global or topic-specific log level", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string..." - }, - "log.line-number" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "append line number and file name", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.output" : { - "category" : "option", - "default" : [ - ], - "deprecatedIn" : null, - "description" : "log destination(s)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string..." - }, - "log.performance" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "shortcut for '--log.level performance=trace'", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.prefix" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "prefix log message with this string", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string" - }, - "log.request-parameters" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "include full URLs and HTTP request parameters in trace logs", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.role" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "log server role", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.shorten-filenames" : { - "category" : "option", - "default" : true, - "deprecatedIn" : null, - "description" : "shorten filenames in log output (use with --log.line-number)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.thread" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "show thread identifier in log message", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.thread-name" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "show thread name in log message", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.time-format" : { - "category" : "option", - "default" : "utc-datestring", - "deprecatedIn" : null, - "description" : "time format to use in logs", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : [ - "v3.5.0" - ], - "obsolete" : false, - "requiresValue" : true, - "section" : "log", - "type" : "string", - "values" : "Possible values: \"local-datestring\", \"timestamp\", \"timestamp-micros\", \"timestamp-millis\", \"uptime\", \"uptime-micros\", \"uptime-millis\", \"utc-datestring\"" - }, - "log.use-local-time" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "use local timezone instead of UTC", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "log.use-microtime" : { - "category" : "option", - "default" : false, - "deprecatedIn" : [ - "v3.5.0" - ], - "description" : "use microtime instead", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "log", - "type" : "boolean" - }, - "quiet" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "silent startup", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - }, - "random.generator" : { - "category" : "option", - "default" : 1, - "deprecatedIn" : null, - "description" : "random number generator to use (1 = MERSENNE, 2 = RANDOM, 3 = URANDOM, 4 = COMBINED (not for Windows), 5 = WinCrypt (Windows only)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "random", - "type" : "uint32", - "values" : "Possible values: 1, 2, 3, 4" - }, - "server.ask-jwt-secret" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "if this option is specified, the user will be prompted for a JWT secret. This option is not compatible with --server.username or --server.password. If specified, it will be used for all connections - even when a new connection to another server is created", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "server", - "type" : "boolean" - }, - "server.authentication" : { - "category" : "option", - "default" : false, - "deprecatedIn" : null, - "description" : "require authentication credentials when connecting (does not affect the server-side authentication settings)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "server", - "type" : "boolean" - }, - "server.connection-timeout" : { - "category" : "option", - "default" : 5, - "deprecatedIn" : null, - "description" : "connection timeout in seconds", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "double" - }, - "server.database" : { - "category" : "option", - "default" : "_system", - "deprecatedIn" : null, - "description" : "database name to use when connecting", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.endpoint" : { - "category" : "option", - "default" : "http+tcp://127.0.0.1:8529", - "deprecatedIn" : null, - "description" : "endpoint to connect to, use 'none' to start without a server. Use http+ssl:// or vst+ssl:// as schema to connect to an SSL-secured server endpoint, otherwise http+tcp://, vst+tcp:// or unix://", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.max-packet-size" : { - "category" : "option", - "default" : 1073741824, - "deprecatedIn" : null, - "description" : "maximum packet size (in bytes) for client/server communication", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : true, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "uint64" - }, - "server.password" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "password to use when connecting. If not specified and authentication is required, the user will be prompted for a password", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "server.request-timeout" : { - "category" : "option", - "default" : 1200, - "deprecatedIn" : null, - "description" : "request timeout in seconds", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "double" - }, - "server.username" : { - "category" : "option", - "default" : "root", - "deprecatedIn" : null, - "description" : "username to use when connecting", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "server", - "type" : "string" - }, - "ssl.protocol" : { - "category" : "option", - "default" : 5, - "deprecatedIn" : null, - "description" : "ssl protocol (1 = SSLv2 (unsupported), 2 = SSLv2 or SSLv3 (negotiated), 3 = SSLv3, 4 = TLSv1, 5 = TLSv1.2)", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "ssl", - "type" : "uint64", - "values" : "Possible values: 1, 2, 3, 4, 5" - }, - "temp.path" : { - "category" : "option", - "default" : "", - "deprecatedIn" : null, - "description" : "path for temporary files", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : true, - "section" : "temp", - "type" : "string" - }, - "version" : { - "category" : "command", - "default" : false, - "deprecatedIn" : null, - "description" : "reports the version and exits", - "dynamic" : false, - "enterpriseOnly" : false, - "hidden" : false, - "introducedIn" : null, - "obsolete" : false, - "requiresValue" : false, - "section" : "", - "type" : "boolean" - } -} diff --git a/Documentation/Examples/col_dropIndex.generated b/Documentation/Examples/col_dropIndex.generated deleted file mode 100644 index 873df4af0fa5..000000000000 --- a/Documentation/Examples/col_dropIndex.generated +++ /dev/null @@ -1,56 +0,0 @@ -arangosh> db.example.ensureSkiplist("a", "b"); -{ - "deduplicate" : true, - "fields" : [ - "a", - "b" - ], - "id" : "example/109681", - "isNewlyCreated" : true, - "sparse" : false, - "type" : "skiplist", - "unique" : false, - "code" : 201 -} -arangosh> var indexInfo = db.example.getIndexes(); -arangosh> indexInfo; -[ - { - "fields" : [ - "_key" - ], - "id" : "example/0", - "selectivityEstimate" : 1, - "sparse" : false, - "type" : "primary", - "unique" : true - }, - { - "deduplicate" : true, - "fields" : [ - "a", - "b" - ], - "id" : "example/109681", - "sparse" : false, - "type" : "skiplist", - "unique" : false - } -] -arangosh> db.example.dropIndex(indexInfo[0]) -false -arangosh> db.example.dropIndex(indexInfo[1].id) -true -arangosh> indexInfo = db.example.getIndexes(); -[ - { - "fields" : [ - "_key" - ], - "id" : "example/0", - "selectivityEstimate" : 1, - "sparse" : false, - "type" : "primary", - "unique" : true - } -] diff --git a/Documentation/Examples/collectionCount.generated b/Documentation/Examples/collectionCount.generated deleted file mode 100644 index 9b7fc2c80866..000000000000 --- a/Documentation/Examples/collectionCount.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> db.users.count(); -0 diff --git a/Documentation/Examples/collectionDatabaseCollectionName.generated b/Documentation/Examples/collectionDatabaseCollectionName.generated deleted file mode 100644 index 473c6637aea0..000000000000 --- a/Documentation/Examples/collectionDatabaseCollectionName.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> db.example; -[ArangoCollection 109705, "example" (type document, status loaded)] diff --git a/Documentation/Examples/collectionDatabaseCreate.generated b/Documentation/Examples/collectionDatabaseCreate.generated deleted file mode 100644 index 8a9022496250..000000000000 --- a/Documentation/Examples/collectionDatabaseCreate.generated +++ /dev/null @@ -1,16 +0,0 @@ -arangosh> c = db._create("users"); -[ArangoCollection 109716, "users" (type document, status loaded)] -arangosh> c.properties(); -{ - "doCompact" : true, - "journalSize" : 33554432, - "isSystem" : false, - "isVolatile" : false, - "waitForSync" : false, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "indexBuckets" : 8 -} diff --git a/Documentation/Examples/collectionDatabaseCreateKey.generated b/Documentation/Examples/collectionDatabaseCreateKey.generated deleted file mode 100644 index 46cf8a451840..000000000000 --- a/Documentation/Examples/collectionDatabaseCreateKey.generated +++ /dev/null @@ -1,21 +0,0 @@ -arangosh> db._create("users", -........> { keyOptions: { type: "autoincrement", offset: 10, increment: 5 } }); -[ArangoCollection 109728, "users" (type document, status loaded)] -arangosh> db.users.save({ name: "user 1" }); -{ - "_id" : "users/10", - "_key" : "10", - "_rev" : "_YOn1dK---_" -} -arangosh> db.users.save({ name: "user 2" }); -{ - "_id" : "users/15", - "_key" : "15", - "_rev" : "_YOn1dK---B" -} -arangosh> db.users.save({ name: "user 3" }); -{ - "_id" : "users/20", - "_key" : "20", - "_rev" : "_YOn1dK---D" -} diff --git a/Documentation/Examples/collectionDatabaseCreateProperties.generated b/Documentation/Examples/collectionDatabaseCreateProperties.generated deleted file mode 100644 index 49e06ebfde96..000000000000 --- a/Documentation/Examples/collectionDatabaseCreateProperties.generated +++ /dev/null @@ -1,17 +0,0 @@ -arangosh> c = db._create("users", { waitForSync : true, -........> journalSize : 1024 * 1204}); -[ArangoCollection 109746, "users" (type document, status loaded)] -arangosh> c.properties(); -{ - "doCompact" : true, - "journalSize" : 1232896, - "isSystem" : false, - "isVolatile" : false, - "waitForSync" : true, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "indexBuckets" : 8 -} diff --git a/Documentation/Examples/collectionDatabaseCreateSpecialKey.generated b/Documentation/Examples/collectionDatabaseCreateSpecialKey.generated deleted file mode 100644 index ee592ecf0aad..000000000000 --- a/Documentation/Examples/collectionDatabaseCreateSpecialKey.generated +++ /dev/null @@ -1,16 +0,0 @@ -arangosh> db._create("users", { keyOptions: { allowUserKeys: false } }); -[ArangoCollection 109758, "users" (type document, status loaded)] -arangosh> db.users.save({ name: "user 1" }); -{ - "_id" : "users/109765", - "_key" : "109765", - "_rev" : "_YOn1dMC--_" -} -arangosh> db.users.save({ name: "user 2", _key: "myuser" }); -[ArangoError 1222: unexpected document key] -arangosh> db.users.save({ name: "user 3" }); -{ - "_id" : "users/109770", - "_key" : "109770", - "_rev" : "_YOn1dMC--C" -} diff --git a/Documentation/Examples/collectionDatabaseCreateSuccess.generated b/Documentation/Examples/collectionDatabaseCreateSuccess.generated deleted file mode 100644 index 74ddc8f998d0..000000000000 --- a/Documentation/Examples/collectionDatabaseCreateSuccess.generated +++ /dev/null @@ -1,16 +0,0 @@ -arangosh> c = db._create("users"); -[ArangoCollection 109777, "users" (type document, status loaded)] -arangosh> c.properties(); -{ - "doCompact" : true, - "journalSize" : 33554432, - "isSystem" : false, - "isVolatile" : false, - "waitForSync" : false, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "indexBuckets" : 8 -} diff --git a/Documentation/Examples/collectionDatabaseDrop.generated b/Documentation/Examples/collectionDatabaseDrop.generated deleted file mode 100644 index 7fc3398f2761..000000000000 --- a/Documentation/Examples/collectionDatabaseDrop.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> col = db.example; -[ArangoCollection 109789, "example" (type document, status loaded)] -arangosh> db._drop(col); -arangosh> col; -[ArangoCollection 109789, "example" (type document, status loaded)] diff --git a/Documentation/Examples/collectionDatabaseDropByObject.generated b/Documentation/Examples/collectionDatabaseDropByObject.generated deleted file mode 100644 index 93d1435e849a..000000000000 --- a/Documentation/Examples/collectionDatabaseDropByObject.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> col = db.example; -[ArangoCollection 109800, "example" (type document, status loaded)] -arangosh> db._drop(col); -arangosh> col; -[ArangoCollection 109800, "example" (type document, status loaded)] diff --git a/Documentation/Examples/collectionDatabaseDropName.generated b/Documentation/Examples/collectionDatabaseDropName.generated deleted file mode 100644 index ba8fae1c6eaa..000000000000 --- a/Documentation/Examples/collectionDatabaseDropName.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> col = db.example; -[ArangoCollection 109811, "example" (type document, status loaded)] -arangosh> db._drop("example"); -arangosh> col; -[ArangoCollection 109811, "example" (type document, status deleted)] diff --git a/Documentation/Examples/collectionDatabaseDropSystem.generated b/Documentation/Examples/collectionDatabaseDropSystem.generated deleted file mode 100644 index b9154dea616d..000000000000 --- a/Documentation/Examples/collectionDatabaseDropSystem.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> col = db._example; -[ArangoCollection 109822, "_example" (type document, status loaded)] -arangosh> db._drop("_example", { isSystem: true }); -arangosh> col; -[ArangoCollection 109822, "_example" (type document, status deleted)] diff --git a/Documentation/Examples/collectionDatabaseName.generated b/Documentation/Examples/collectionDatabaseName.generated deleted file mode 100644 index ed73c8fbedb8..000000000000 --- a/Documentation/Examples/collectionDatabaseName.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> db._collection("demo"); -[ArangoCollection 87, "demo" (type document, status loaded)] diff --git a/Documentation/Examples/collectionDatabaseNameKnown.generated b/Documentation/Examples/collectionDatabaseNameKnown.generated deleted file mode 100644 index ed73c8fbedb8..000000000000 --- a/Documentation/Examples/collectionDatabaseNameKnown.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> db._collection("demo"); -[ArangoCollection 87, "demo" (type document, status loaded)] diff --git a/Documentation/Examples/collectionDatabaseNameUnknown.generated b/Documentation/Examples/collectionDatabaseNameUnknown.generated deleted file mode 100644 index feb422d86ea1..000000000000 --- a/Documentation/Examples/collectionDatabaseNameUnknown.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> db._collection("unknown"); -null diff --git a/Documentation/Examples/collectionDatabaseTruncate.generated b/Documentation/Examples/collectionDatabaseTruncate.generated deleted file mode 100644 index 3dfda56c2e71..000000000000 --- a/Documentation/Examples/collectionDatabaseTruncate.generated +++ /dev/null @@ -1,13 +0,0 @@ -arangosh> col = db.example; -[ArangoCollection 109830, "example" (type document, status loaded)] -arangosh> col.save({ "Hello" : "World" }); -{ - "_id" : "example/109837", - "_key" : "109837", - "_rev" : "_YOn1dSS--B" -} -arangosh> col.count(); -1 -arangosh> db._truncate(col); -arangosh> col.count(); -0 diff --git a/Documentation/Examples/collectionDatabaseTruncateByObject.generated b/Documentation/Examples/collectionDatabaseTruncateByObject.generated deleted file mode 100644 index d56ce7f3d4b4..000000000000 --- a/Documentation/Examples/collectionDatabaseTruncateByObject.generated +++ /dev/null @@ -1,13 +0,0 @@ -arangosh> col = db.example; -[ArangoCollection 109853, "example" (type document, status loaded)] -arangosh> col.save({ "Hello" : "World" }); -{ - "_id" : "example/109860", - "_key" : "109860", - "_rev" : "_YOn1dTW--_" -} -arangosh> col.count(); -1 -arangosh> db._truncate(col); -arangosh> col.count(); -0 diff --git a/Documentation/Examples/collectionDatabaseTruncateName.generated b/Documentation/Examples/collectionDatabaseTruncateName.generated deleted file mode 100644 index a033d8e9574d..000000000000 --- a/Documentation/Examples/collectionDatabaseTruncateName.generated +++ /dev/null @@ -1,13 +0,0 @@ -arangosh> col = db.example; -[ArangoCollection 109877, "example" (type document, status loaded)] -arangosh> col.save({ "Hello" : "World" }); -{ - "_id" : "example/109884", - "_key" : "109884", - "_rev" : "_YOn1dUa--_" -} -arangosh> col.count(); -1 -arangosh> db._truncate("example"); -arangosh> col.count(); -0 diff --git a/Documentation/Examples/collectionDrop.generated b/Documentation/Examples/collectionDrop.generated deleted file mode 100644 index 70fc107f0515..000000000000 --- a/Documentation/Examples/collectionDrop.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> col = db.example; -[ArangoCollection 109900, "example" (type document, status loaded)] -arangosh> col.drop(); -arangosh> col; -[ArangoCollection 109900, "example" (type document, status deleted)] diff --git a/Documentation/Examples/collectionDropSystem.generated b/Documentation/Examples/collectionDropSystem.generated deleted file mode 100644 index 8a6ab7f1cf36..000000000000 --- a/Documentation/Examples/collectionDropSystem.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> col = db._example; -[ArangoCollection 109911, "_example" (type document, status loaded)] -arangosh> col.drop({ isSystem: true }); -arangosh> col; -[ArangoCollection 109911, "_example" (type document, status deleted)] diff --git a/Documentation/Examples/collectionEnsureIndex.generated b/Documentation/Examples/collectionEnsureIndex.generated deleted file mode 100644 index 120c0aa55ed6..000000000000 --- a/Documentation/Examples/collectionEnsureIndex.generated +++ /dev/null @@ -1,29 +0,0 @@ -arangosh> db.test.ensureIndex({ type: "hash", fields: [ "a" ], sparse: true }); -{ - "deduplicate" : true, - "fields" : [ - "a" - ], - "id" : "test/109923", - "isNewlyCreated" : true, - "selectivityEstimate" : 1, - "sparse" : true, - "type" : "hash", - "unique" : false, - "code" : 201 -} -arangosh> db.test.ensureIndex({ type: "hash", fields: [ "a", "b" ], unique: true }); -{ - "deduplicate" : true, - "fields" : [ - "a", - "b" - ], - "id" : "test/109926", - "isNewlyCreated" : true, - "selectivityEstimate" : 1, - "sparse" : false, - "type" : "hash", - "unique" : true, - "code" : 201 -} diff --git a/Documentation/Examples/collectionFigures_mmfiles.generated b/Documentation/Examples/collectionFigures_mmfiles.generated deleted file mode 100644 index 213cc2d81898..000000000000 --- a/Documentation/Examples/collectionFigures_mmfiles.generated +++ /dev/null @@ -1,44 +0,0 @@ -arangosh> db.demo.figures() -{ - "indexes" : { - "count" : 1, - "size" : 32128 - }, - "documentReferences" : 0, - "waitingFor" : "-", - "alive" : { - "count" : 1, - "size" : 184 - }, - "dead" : { - "count" : 0, - "size" : 0, - "deletion" : 0 - }, - "compactionStatus" : { - "message" : "skipped compaction because collection has no datafiles", - "time" : "2019-02-20T10:33:14Z", - "count" : 0, - "filesCombined" : 0, - "bytesRead" : 0, - "bytesWritten" : 0 - }, - "datafiles" : { - "count" : 0, - "fileSize" : 0 - }, - "journals" : { - "count" : 1, - "fileSize" : 33554432 - }, - "compactors" : { - "count" : 0, - "fileSize" : 0 - }, - "revisions" : { - "count" : 1, - "size" : 48192 - }, - "lastTick" : 95, - "uncollectedLogfileEntries" : 0 -} diff --git a/Documentation/Examples/collectionFirstExample.generated b/Documentation/Examples/collectionFirstExample.generated deleted file mode 100644 index 25ad34df5c63..000000000000 --- a/Documentation/Examples/collectionFirstExample.generated +++ /dev/null @@ -1,7 +0,0 @@ -arangosh> db.users.firstExample("name", "Angela"); -{ - "_key" : "109957", - "_id" : "users/109957", - "_rev" : "_YOn1e3u--B", - "name" : "Angela" -} diff --git a/Documentation/Examples/collectionFulltext.generated b/Documentation/Examples/collectionFulltext.generated deleted file mode 100644 index 47ead3f9b59b..000000000000 --- a/Documentation/Examples/collectionFulltext.generated +++ /dev/null @@ -1,48 +0,0 @@ -arangosh> db.emails.ensureFulltextIndex("content"); -{ - "fields" : [ - "content" - ], - "id" : "emails/109972", - "isNewlyCreated" : true, - "minLength" : 2, - "sparse" : true, - "type" : "fulltext", - "unique" : false, - "code" : 201 -} -arangosh> db.emails.save({ content: -........> "Hello Alice, how are you doing? Regards, Bob"}); -{ - "_id" : "emails/109975", - "_key" : "109975", - "_rev" : "_YOn1e5q--_" -} -arangosh> db.emails.save({ content: -........> "Hello Charlie, do Alice and Bob know about it?"}); -{ - "_id" : "emails/109979", - "_key" : "109979", - "_rev" : "_YOn1e5u--_" -} -arangosh> db.emails.save({ content: "I think they don't know. Regards, Eve" }); -{ - "_id" : "emails/109982", - "_key" : "109982", - "_rev" : "_YOn1e5u--B" -} -arangosh> db.emails.fulltext("content", "charlie,|eve").toArray(); -[ - { - "_key" : "109979", - "_id" : "emails/109979", - "_rev" : "_YOn1e5u--_", - "content" : "Hello Charlie, do Alice and Bob know about it?" - }, - { - "_key" : "109982", - "_id" : "emails/109982", - "_rev" : "_YOn1e5u--B", - "content" : "I think they don't know. Regards, Eve" - } -] diff --git a/Documentation/Examples/collectionGetIndexes.generated b/Documentation/Examples/collectionGetIndexes.generated deleted file mode 100644 index 3b0d4141dc01..000000000000 --- a/Documentation/Examples/collectionGetIndexes.generated +++ /dev/null @@ -1,61 +0,0 @@ -arangosh> db.test.ensureHashIndex("hashListAttribute", -........> "hashListSecondAttribute.subAttribute"); -{ - "deduplicate" : true, - "fields" : [ - "hashListAttribute", - "hashListSecondAttribute.subAttribute" - ], - "id" : "test/110005", - "isNewlyCreated" : true, - "selectivityEstimate" : 1, - "sparse" : false, - "type" : "hash", - "unique" : false, - "code" : 201 -} -arangosh> db.test.getIndexes(); -[ - { - "fields" : [ - "_key" - ], - "id" : "test/0", - "selectivityEstimate" : 1, - "sparse" : false, - "type" : "primary", - "unique" : true - }, - { - "deduplicate" : true, - "fields" : [ - "skiplistAttribute" - ], - "id" : "test/109999", - "sparse" : false, - "type" : "skiplist", - "unique" : true - }, - { - "deduplicate" : true, - "fields" : [ - "skiplistUniqueAttribute" - ], - "id" : "test/110002", - "sparse" : false, - "type" : "skiplist", - "unique" : true - }, - { - "deduplicate" : true, - "fields" : [ - "hashListAttribute", - "hashListSecondAttribute.subAttribute" - ], - "id" : "test/110005", - "selectivityEstimate" : 1, - "sparse" : false, - "type" : "hash", - "unique" : false - } -] diff --git a/Documentation/Examples/collectionLoad.generated b/Documentation/Examples/collectionLoad.generated deleted file mode 100644 index 69213fdaac2c..000000000000 --- a/Documentation/Examples/collectionLoad.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> col = db.example; -[ArangoCollection 110013, "example" (type document, status loaded)] -arangosh> col.load(); -arangosh> col; -[ArangoCollection 110013, "example" (type document, status loaded)] diff --git a/Documentation/Examples/collectionLookupByKeys.generated b/Documentation/Examples/collectionLookupByKeys.generated deleted file mode 100644 index c9bdeb72046f..000000000000 --- a/Documentation/Examples/collectionLookupByKeys.generated +++ /dev/null @@ -1,71 +0,0 @@ -arangosh> keys = [ ]; -[ ] -arangosh> for (var i = 0; i < 10; ++i) { -........> db.example.insert({ _key: "test" + i, value: i }); -........> keys.push("test" + i); -........> } -arangosh> db.example.documents(keys); -{ - "documents" : [ - { - "_key" : "test0", - "_id" : "example/test0", - "_rev" : "_YOn1f_6--B", - "value" : 0 - }, - { - "_key" : "test1", - "_id" : "example/test1", - "_rev" : "_YOn1f_6--D", - "value" : 1 - }, - { - "_key" : "test2", - "_id" : "example/test2", - "_rev" : "_YOn1fA---_", - "value" : 2 - }, - { - "_key" : "test3", - "_id" : "example/test3", - "_rev" : "_YOn1fA---B", - "value" : 3 - }, - { - "_key" : "test4", - "_id" : "example/test4", - "_rev" : "_YOn1fA---D", - "value" : 4 - }, - { - "_key" : "test5", - "_id" : "example/test5", - "_rev" : "_YOn1fA---F", - "value" : 5 - }, - { - "_key" : "test6", - "_id" : "example/test6", - "_rev" : "_YOn1fA---H", - "value" : 6 - }, - { - "_key" : "test7", - "_id" : "example/test7", - "_rev" : "_YOn1fA---J", - "value" : 7 - }, - { - "_key" : "test8", - "_id" : "example/test8", - "_rev" : "_YOn1fAC--_", - "value" : 8 - }, - { - "_key" : "test9", - "_id" : "example/test9", - "_rev" : "_YOn1fAC--B", - "value" : 9 - } - ] -} diff --git a/Documentation/Examples/collectionProperties.generated b/Documentation/Examples/collectionProperties.generated deleted file mode 100644 index 006764c8db52..000000000000 --- a/Documentation/Examples/collectionProperties.generated +++ /dev/null @@ -1,14 +0,0 @@ -arangosh> db.example.properties(); -{ - "doCompact" : true, - "journalSize" : 33554432, - "isSystem" : false, - "isVolatile" : false, - "waitForSync" : false, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "indexBuckets" : 8 -} diff --git a/Documentation/Examples/collectionProperty.generated b/Documentation/Examples/collectionProperty.generated deleted file mode 100644 index 26145368941f..000000000000 --- a/Documentation/Examples/collectionProperty.generated +++ /dev/null @@ -1,14 +0,0 @@ -arangosh> db.example.properties({ waitForSync : true }); -{ - "doCompact" : true, - "journalSize" : 33554432, - "isSystem" : false, - "isVolatile" : false, - "waitForSync" : true, - "keyOptions" : { - "allowUserKeys" : true, - "type" : "traditional", - "lastValue" : 0 - }, - "indexBuckets" : 8 -} diff --git a/Documentation/Examples/collectionRemoveByKeys.generated b/Documentation/Examples/collectionRemoveByKeys.generated deleted file mode 100644 index dfad3a285e4a..000000000000 --- a/Documentation/Examples/collectionRemoveByKeys.generated +++ /dev/null @@ -1,11 +0,0 @@ -arangosh> keys = [ ]; -[ ] -arangosh> for (var i = 0; i < 10; ++i) { -........> db.example.insert({ _key: "test" + i, value: i }); -........> keys.push("test" + i); -........> } -arangosh> db.example.removeByKeys(keys); -{ - "removed" : 10, - "ignored" : 0 -} diff --git a/Documentation/Examples/collectionRename.generated b/Documentation/Examples/collectionRename.generated deleted file mode 100644 index bebdf76cd5c6..000000000000 --- a/Documentation/Examples/collectionRename.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> c = db.example; -[ArangoCollection 110131, "example" (type document, status loaded)] -arangosh> c.rename("better-example"); -arangosh> c; -[ArangoCollection 110131, "better-example" (type document, status loaded)] diff --git a/Documentation/Examples/collectionTruncate.generated b/Documentation/Examples/collectionTruncate.generated deleted file mode 100644 index a24d773f2056..000000000000 --- a/Documentation/Examples/collectionTruncate.generated +++ /dev/null @@ -1,13 +0,0 @@ -arangosh> col = db.example; -[ArangoCollection 110141, "example" (type document, status loaded)] -arangosh> col.save({ "Hello" : "World" }); -{ - "_id" : "example/110148", - "_key" : "110148", - "_rev" : "_YOn1fGK--_" -} -arangosh> col.count(); -1 -arangosh> col.truncate(); -arangosh> col.count(); -0 diff --git a/Documentation/Examples/collectionsDatabaseName.generated b/Documentation/Examples/collectionsDatabaseName.generated deleted file mode 100644 index a325da73856c..000000000000 --- a/Documentation/Examples/collectionsDatabaseName.generated +++ /dev/null @@ -1,18 +0,0 @@ -arangosh> db._collections(); -[ - [ArangoCollection 32, "_appbundles" (type document, status loaded)], - [ArangoCollection 27, "_apps" (type document, status loaded)], - [ArangoCollection 13, "_aqlfunctions" (type document, status loaded)], - [ArangoCollection 15, "_frontend" (type document, status loaded)], - [ArangoCollection 6, "_graphs" (type document, status loaded)], - [ArangoCollection 2, "_iresearch_analyzers" (type document, status loaded)], - [ArangoCollection 19, "_jobs" (type document, status loaded)], - [ArangoCollection 17, "_queues" (type document, status loaded)], - [ArangoCollection 66, "_statistics" (type document, status loaded)], - [ArangoCollection 71, "_statistics15" (type document, status loaded)], - [ArangoCollection 61, "_statisticsRaw" (type document, status loaded)], - [ArangoCollection 8, "_users" (type document, status loaded)], - [ArangoCollection 96, "animals" (type document, status loaded)], - [ArangoCollection 87, "demo" (type document, status loaded)], - [ArangoCollection 110164, "example" (type document, status loaded)] -] diff --git a/Documentation/Examples/cursorCount.generated b/Documentation/Examples/cursorCount.generated deleted file mode 100644 index 831f6ca7d5a2..000000000000 --- a/Documentation/Examples/cursorCount.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> db.five.all().limit(2).count(); -null diff --git a/Documentation/Examples/cursorCountLimit.generated b/Documentation/Examples/cursorCountLimit.generated deleted file mode 100644 index 825b8f222b5f..000000000000 --- a/Documentation/Examples/cursorCountLimit.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> db.five.all().limit(2).count(true); -null diff --git a/Documentation/Examples/cursorCountUnLimited.generated b/Documentation/Examples/cursorCountUnLimited.generated deleted file mode 100644 index 831f6ca7d5a2..000000000000 --- a/Documentation/Examples/cursorCountUnLimited.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> db.five.all().limit(2).count(); -null diff --git a/Documentation/Examples/cursorHasNext.generated b/Documentation/Examples/cursorHasNext.generated deleted file mode 100644 index b76a428f33ad..000000000000 --- a/Documentation/Examples/cursorHasNext.generated +++ /dev/null @@ -1,32 +0,0 @@ -arangosh> var a = db._query("FOR x IN five RETURN x"); -arangosh> while (a.hasNext()) print(a.next()); -{ - "_key" : "110269", - "_id" : "five/110269", - "_rev" : "_YOn1fLW--_", - "name" : "one" -} -{ - "_key" : "110273", - "_id" : "five/110273", - "_rev" : "_YOn1fLW--B", - "name" : "two" -} -{ - "_key" : "110282", - "_id" : "five/110282", - "_rev" : "_YOn1fLa---", - "name" : "five" -} -{ - "_key" : "110276", - "_id" : "five/110276", - "_rev" : "_YOn1fLW--D", - "name" : "three" -} -{ - "_key" : "110279", - "_id" : "five/110279", - "_rev" : "_YOn1fLW--F", - "name" : "four" -} diff --git a/Documentation/Examples/cursorNext.generated b/Documentation/Examples/cursorNext.generated deleted file mode 100644 index d50ae89f4486..000000000000 --- a/Documentation/Examples/cursorNext.generated +++ /dev/null @@ -1,7 +0,0 @@ -arangosh> db._query("FOR x IN five RETURN x").next(); -{ - "_key" : "110297", - "_id" : "five/110297", - "_rev" : "_YOn1fMW--B", - "name" : "one" -} diff --git a/Documentation/Examples/dateIso8601.generated b/Documentation/Examples/dateIso8601.generated deleted file mode 100644 index e1fbc75b95b4..000000000000 --- a/Documentation/Examples/dateIso8601.generated +++ /dev/null @@ -1,7 +0,0 @@ -@Q: -RETURN DATE_ISO8601('2017-02-03T18:25:43Z') - -@R -[ - "2017-02-03T18:25:43.000Z" -] \ No newline at end of file diff --git a/Documentation/Examples/dbId.generated b/Documentation/Examples/dbId.generated deleted file mode 100644 index 302f17f9276e..000000000000 --- a/Documentation/Examples/dbId.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> require("@arangodb").db._id(); -1 diff --git a/Documentation/Examples/dbName.generated b/Documentation/Examples/dbName.generated deleted file mode 100644 index 8a804e94d384..000000000000 --- a/Documentation/Examples/dbName.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> require("@arangodb").db._name(); -_system diff --git a/Documentation/Examples/dbPath.generated b/Documentation/Examples/dbPath.generated deleted file mode 100644 index 21b1d8d744b8..000000000000 --- a/Documentation/Examples/dbPath.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> require("@arangodb").db._path(); -/tmp/arangosh_uprJb4/tmp-27793-56941049/data/databases/database-1 diff --git a/Documentation/Examples/dbVersion.generated b/Documentation/Examples/dbVersion.generated deleted file mode 100644 index 4884781ea6ad..000000000000 --- a/Documentation/Examples/dbVersion.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> require("@arangodb").db._version(); -3.5.0-devel diff --git a/Documentation/Examples/documentDocumentRemove.generated b/Documentation/Examples/documentDocumentRemove.generated deleted file mode 100644 index 1359af8f7458..000000000000 --- a/Documentation/Examples/documentDocumentRemove.generated +++ /dev/null @@ -1,21 +0,0 @@ -arangosh> a1 = db.example.insert({ a : 1 }); -{ - "_id" : "example/110329", - "_key" : "110329", - "_rev" : "_YOn1fNW--B" -} -arangosh> db.example.document(a1); -{ - "_key" : "110329", - "_id" : "example/110329", - "_rev" : "_YOn1fNW--B", - "a" : 1 -} -arangosh> db.example.remove(a1); -{ - "_id" : "example/110329", - "_key" : "110329", - "_rev" : "_YOn1fNW--B" -} -arangosh> db.example.document(a1); -[ArangoError 1202: document not found] diff --git a/Documentation/Examples/documentDocumentRemoveConflict.generated b/Documentation/Examples/documentDocumentRemoveConflict.generated deleted file mode 100644 index d5171d2aa2e7..000000000000 --- a/Documentation/Examples/documentDocumentRemoveConflict.generated +++ /dev/null @@ -1,23 +0,0 @@ -arangosh> a1 = db.example.insert({ a : 1 }); -{ - "_id" : "example/110348", - "_key" : "110348", - "_rev" : "_YOn1fOa--B" -} -arangosh> a2 = db.example.replace(a1, { a : 2 }); -{ - "_id" : "example/110348", - "_key" : "110348", - "_rev" : "_YOn1fOe--_", - "_oldRev" : "_YOn1fOa--B" -} -arangosh> db.example.remove(a1); -[ArangoError 1200: precondition failed] -arangosh> db.example.remove(a1, true); -{ - "_id" : "example/110348", - "_key" : "110348", - "_rev" : "_YOn1fOe--_" -} -arangosh> db.example.document(a1); -[ArangoError 1202: document not found] diff --git a/Documentation/Examples/documentDocumentRemoveSimple.generated b/Documentation/Examples/documentDocumentRemoveSimple.generated deleted file mode 100644 index 07470bb663d8..000000000000 --- a/Documentation/Examples/documentDocumentRemoveSimple.generated +++ /dev/null @@ -1,21 +0,0 @@ -arangosh> a1 = db.example.insert({ a : 1 }); -{ - "_id" : "example/110369", - "_key" : "110369", - "_rev" : "_YOn1fPe--_" -} -arangosh> db.example.document(a1); -{ - "_key" : "110369", - "_id" : "example/110369", - "_rev" : "_YOn1fPe--_", - "a" : 1 -} -arangosh> db.example.remove(a1); -{ - "_id" : "example/110369", - "_key" : "110369", - "_rev" : "_YOn1fPe--_" -} -arangosh> db.example.document(a1); -[ArangoError 1202: document not found] diff --git a/Documentation/Examples/documentDocumentUpdate.generated b/Documentation/Examples/documentDocumentUpdate.generated deleted file mode 100644 index aba966e72f21..000000000000 --- a/Documentation/Examples/documentDocumentUpdate.generated +++ /dev/null @@ -1,15 +0,0 @@ -arangosh> a1 = db.example.insert({ a : 1 }); -{ - "_id" : "example/110388", - "_key" : "110388", - "_rev" : "_YOn1fQe--B" -} -arangosh> a2 = db._update(a1, { b : 2 }); -{ - "_id" : "example/110388", - "_key" : "110388", - "_rev" : "_YOn1fQi--_", - "_oldRev" : "_YOn1fQe--B" -} -arangosh> a3 = db._update(a1, { c : 3 }); -[ArangoError 1200: precondition failed] diff --git a/Documentation/Examples/documentsCollectionInsert.generated b/Documentation/Examples/documentsCollectionInsert.generated deleted file mode 100644 index 4d599620c35c..000000000000 --- a/Documentation/Examples/documentsCollectionInsert.generated +++ /dev/null @@ -1,12 +0,0 @@ -arangosh> db.example.insert({ Hello : "World" }); -{ - "_id" : "example/110406", - "_key" : "110406", - "_rev" : "_YOn1fRi--_" -} -arangosh> db.example.insert({ Hello : "World" }, true); -{ - "_id" : "example/110410", - "_key" : "110410", - "_rev" : "_YOn1fRi--B" -} diff --git a/Documentation/Examples/documentsCollectionInsertMulti.generated b/Documentation/Examples/documentsCollectionInsertMulti.generated deleted file mode 100644 index 7786f9075114..000000000000 --- a/Documentation/Examples/documentsCollectionInsertMulti.generated +++ /dev/null @@ -1,26 +0,0 @@ -arangosh> db.example.insert([{ Hello : "World" }, {Hello: "there"}]) -[ - { - "_id" : "example/110424", - "_key" : "110424", - "_rev" : "_YOn1fSi--B" - }, - { - "_id" : "example/110428", - "_key" : "110428", - "_rev" : "_YOn1fSi--D" - } -] -arangosh> db.example.insert([{ Hello : "World" }, {}], {waitForSync: true}); -[ - { - "_id" : "example/110432", - "_key" : "110432", - "_rev" : "_YOn1fSm--_" - }, - { - "_id" : "example/110436", - "_key" : "110436", - "_rev" : "_YOn1fSm--B" - } -] diff --git a/Documentation/Examples/documentsCollectionInsertSingle.generated b/Documentation/Examples/documentsCollectionInsertSingle.generated deleted file mode 100644 index 6982d7175050..000000000000 --- a/Documentation/Examples/documentsCollectionInsertSingle.generated +++ /dev/null @@ -1,12 +0,0 @@ -arangosh> db.example.insert({ Hello : "World" }); -{ - "_id" : "example/110451", - "_key" : "110451", - "_rev" : "_YOn1fUG--B" -} -arangosh> db.example.insert({ Hello : "World" }, {waitForSync: true}); -{ - "_id" : "example/110455", - "_key" : "110455", - "_rev" : "_YOn1fUK--_" -} diff --git a/Documentation/Examples/documentsCollectionInsertSingleOverwrite.generated b/Documentation/Examples/documentsCollectionInsertSingleOverwrite.generated deleted file mode 100644 index 4e94557808ab..000000000000 --- a/Documentation/Examples/documentsCollectionInsertSingleOverwrite.generated +++ /dev/null @@ -1,19 +0,0 @@ -arangosh> db.example.insert({ _key : "666", Hello : "World" }); -{ - "_id" : "example/666", - "_key" : "666", - "_rev" : "_YOn1fVO--B" -} -arangosh> db.example.insert({ _key : "666", Hello : "Universe" }, {overwrite: true, returnOld: true}); -{ - "_id" : "example/666", - "_key" : "666", - "_rev" : "_YOn1fVS--B", - "_oldRev" : "_YOn1fVO--B", - "old" : { - "_key" : "666", - "_id" : "example/666", - "_rev" : "_YOn1fVO--B", - "Hello" : "World" - } -} diff --git a/Documentation/Examples/documentsCollectionName.generated b/Documentation/Examples/documentsCollectionName.generated deleted file mode 100644 index b116e771ff90..000000000000 --- a/Documentation/Examples/documentsCollectionName.generated +++ /dev/null @@ -1,6 +0,0 @@ -arangosh> db.example.document("example/2873916"); -{ - "_key" : "2873916", - "_id" : "example/2873916", - "_rev" : "_YOn1fWS--_" -} diff --git a/Documentation/Examples/documentsCollectionNameHandle.generated b/Documentation/Examples/documentsCollectionNameHandle.generated deleted file mode 100644 index 370ca04b4ed8..000000000000 --- a/Documentation/Examples/documentsCollectionNameHandle.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> db.example.document(""); -[ArangoError 1205: illegal document handle] diff --git a/Documentation/Examples/documentsCollectionNameUnknown.generated b/Documentation/Examples/documentsCollectionNameUnknown.generated deleted file mode 100644 index 117d80ac5057..000000000000 --- a/Documentation/Examples/documentsCollectionNameUnknown.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> db.example.document("example/4472917"); -[ArangoError 1202: document not found] diff --git a/Documentation/Examples/documentsCollectionNameValid.generated b/Documentation/Examples/documentsCollectionNameValid.generated deleted file mode 100644 index b96c8c7a3877..000000000000 --- a/Documentation/Examples/documentsCollectionNameValid.generated +++ /dev/null @@ -1,6 +0,0 @@ -arangosh> db.example.document("example/2873916"); -{ - "_key" : "2873916", - "_id" : "example/2873916", - "_rev" : "15624" -} diff --git a/Documentation/Examples/documentsCollectionNameValidByKey.generated b/Documentation/Examples/documentsCollectionNameValidByKey.generated deleted file mode 100644 index af7d8826b451..000000000000 --- a/Documentation/Examples/documentsCollectionNameValidByKey.generated +++ /dev/null @@ -1,6 +0,0 @@ -arangosh> db.example.document("2873916"); -{ - "_key" : "2873916", - "_id" : "example/2873916", - "_rev" : "_YOn1fZW--B" -} diff --git a/Documentation/Examples/documentsCollectionNameValidByObject.generated b/Documentation/Examples/documentsCollectionNameValidByObject.generated deleted file mode 100644 index 28a3aba1a0c3..000000000000 --- a/Documentation/Examples/documentsCollectionNameValidByObject.generated +++ /dev/null @@ -1,6 +0,0 @@ -arangosh> db.example.document({_id: "example/2873916"}); -{ - "_key" : "2873916", - "_id" : "example/2873916", - "_rev" : "_YOn1faa--_" -} diff --git a/Documentation/Examples/documentsCollectionNameValidMulti.generated b/Documentation/Examples/documentsCollectionNameValidMulti.generated deleted file mode 100644 index a42622c57a81..000000000000 --- a/Documentation/Examples/documentsCollectionNameValidMulti.generated +++ /dev/null @@ -1,13 +0,0 @@ -arangosh> db.example.document(["2873916","2873917"]); -[ - { - "_key" : "2873916", - "_id" : "example/2873916", - "_rev" : "_YOn1fba--_" - }, - { - "_key" : "2873917", - "_id" : "example/2873917", - "_rev" : "_YOn1fba--B" - } -] diff --git a/Documentation/Examples/documentsCollectionNameValidPlain.generated b/Documentation/Examples/documentsCollectionNameValidPlain.generated deleted file mode 100644 index 2c41512cc348..000000000000 --- a/Documentation/Examples/documentsCollectionNameValidPlain.generated +++ /dev/null @@ -1,6 +0,0 @@ -arangosh> db.example.document("example/2873916"); -{ - "_key" : "2873916", - "_id" : "example/2873916", - "_rev" : "_YOn1fce--_" -} diff --git a/Documentation/Examples/documentsCollectionRemoveConflict.generated b/Documentation/Examples/documentsCollectionRemoveConflict.generated deleted file mode 100644 index 91c4404edc86..000000000000 --- a/Documentation/Examples/documentsCollectionRemoveConflict.generated +++ /dev/null @@ -1,23 +0,0 @@ -arangosh> a1 = db.example.insert({ a : 1 }); -{ - "_id" : "example/110591", - "_key" : "110591", - "_rev" : "_YOn1fde--_" -} -arangosh> a2 = db._replace(a1, { a : 2 }); -{ - "_id" : "example/110591", - "_key" : "110591", - "_rev" : "_YOn1fde--B", - "_oldRev" : "_YOn1fde--_" -} -arangosh> db._remove(a1); -[ArangoError 1200: precondition failed] -arangosh> db._remove(a1, {overwrite: true} ); -{ - "_id" : "example/110591", - "_key" : "110591", - "_rev" : "_YOn1fde--B" -} -arangosh> db._document(a1); -[ArangoError 1202: document not found] diff --git a/Documentation/Examples/documentsCollectionRemoveSignature.generated b/Documentation/Examples/documentsCollectionRemoveSignature.generated deleted file mode 100644 index 141a88456b3c..000000000000 --- a/Documentation/Examples/documentsCollectionRemoveSignature.generated +++ /dev/null @@ -1,13 +0,0 @@ -arangosh> db.example.insert({ _key: "11265325374", a: 1 } ); -{ - "_id" : "example/11265325374", - "_key" : "11265325374", - "_rev" : "_YOn1fee--B" -} -arangosh> db.example.remove("example/11265325374", -........> { overwrite: true, waitForSync: false}) -{ - "_id" : "example/11265325374", - "_key" : "11265325374", - "_rev" : "_YOn1fee--B" -} diff --git a/Documentation/Examples/documentsCollectionRemoveSuccess.generated b/Documentation/Examples/documentsCollectionRemoveSuccess.generated deleted file mode 100644 index 1a0aff742fb0..000000000000 --- a/Documentation/Examples/documentsCollectionRemoveSuccess.generated +++ /dev/null @@ -1,16 +0,0 @@ -arangosh> a1 = db.example.insert({ a : 1 }); -{ - "_id" : "example/110628", - "_key" : "110628", - "_rev" : "_YOn1ffi--B" -} -arangosh> db._remove(a1); -{ - "_id" : "example/110628", - "_key" : "110628", - "_rev" : "_YOn1ffi--B" -} -arangosh> db._remove(a1); -[ArangoError 1202: document not found] -arangosh> db._remove(a1, {overwrite: true}); -[ArangoError 1202: document not found] diff --git a/Documentation/Examples/documentsCollectionReplace.generated b/Documentation/Examples/documentsCollectionReplace.generated deleted file mode 100644 index 19034d1223a5..000000000000 --- a/Documentation/Examples/documentsCollectionReplace.generated +++ /dev/null @@ -1,15 +0,0 @@ -arangosh> a1 = db.example.insert({ a : 1 }); -{ - "_id" : "example/110647", - "_key" : "110647", - "_rev" : "_YOn1fgm--_" -} -arangosh> a2 = db.example.replace(a1, { a : 2 }); -{ - "_id" : "example/110647", - "_key" : "110647", - "_rev" : "_YOn1fgm--B", - "_oldRev" : "_YOn1fgm--_" -} -arangosh> a3 = db.example.replace(a1, { a : 3 }); -[ArangoError 1200: precondition failed] diff --git a/Documentation/Examples/documentsCollectionReplace1.generated b/Documentation/Examples/documentsCollectionReplace1.generated deleted file mode 100644 index 35ca78c07506..000000000000 --- a/Documentation/Examples/documentsCollectionReplace1.generated +++ /dev/null @@ -1,15 +0,0 @@ -arangosh> a1 = db.example.insert({ a : 1 }); -{ - "_id" : "example/110665", - "_key" : "110665", - "_rev" : "_YOn1fhm--B" -} -arangosh> a2 = db.example.replace(a1, { a : 2 }); -{ - "_id" : "example/110665", - "_key" : "110665", - "_rev" : "_YOn1fhq--_", - "_oldRev" : "_YOn1fhm--B" -} -arangosh> a3 = db.example.replace(a1, { a : 3 }); -[ArangoError 1200: precondition failed] diff --git a/Documentation/Examples/documentsCollectionReplaceHandle.generated b/Documentation/Examples/documentsCollectionReplaceHandle.generated deleted file mode 100644 index 0556c0f3c30a..000000000000 --- a/Documentation/Examples/documentsCollectionReplaceHandle.generated +++ /dev/null @@ -1,13 +0,0 @@ -arangosh> a1 = db.example.insert({ a : 1 }); -{ - "_id" : "example/3903045", - "_key" : "3903045", - "_rev" : "_YOn1fiq--D" -} -arangosh> a2 = db.example.replace("example/3903044", { a : 2 }); -{ - "_id" : "example/3903044", - "_key" : "3903044", - "_rev" : "_YOn1fiu--_", - "_oldRev" : "_YOn1fiq--B" -} diff --git a/Documentation/Examples/documentsCollectionUpdate.generated b/Documentation/Examples/documentsCollectionUpdate.generated deleted file mode 100644 index d52697c2ae58..000000000000 --- a/Documentation/Examples/documentsCollectionUpdate.generated +++ /dev/null @@ -1,51 +0,0 @@ -arangosh> a1 = db.example.insert({"a" : 1}); -{ - "_id" : "example/110702", - "_key" : "110702", - "_rev" : "_YOn1fju--_" -} -arangosh> a2 = db.example.update(a1, {"b" : 2, "c" : 3}); -{ - "_id" : "example/110702", - "_key" : "110702", - "_rev" : "_YOn1fju--B", - "_oldRev" : "_YOn1fju--_" -} -arangosh> a3 = db.example.update(a1, {"d" : 4}); -[ArangoError 1200: precondition failed] -arangosh> a4 = db.example.update(a2, {"e" : 5, "f" : 6 }); -{ - "_id" : "example/110702", - "_key" : "110702", - "_rev" : "_YOn1fjy--_", - "_oldRev" : "_YOn1fju--B" -} -arangosh> db.example.document(a4); -{ - "_key" : "110702", - "_id" : "example/110702", - "_rev" : "_YOn1fjy--_", - "a" : 1, - "c" : 3, - "b" : 2, - "f" : 6, - "e" : 5 -} -arangosh> a5 = db.example.update(a4, {"a" : 1, c : 9, e : 42 }); -{ - "_id" : "example/110702", - "_key" : "110702", - "_rev" : "_YOn1fjy--B", - "_oldRev" : "_YOn1fjy--_" -} -arangosh> db.example.document(a5); -{ - "_key" : "110702", - "_id" : "example/110702", - "_rev" : "_YOn1fjy--B", - "a" : 1, - "c" : 9, - "b" : 2, - "f" : 6, - "e" : 42 -} diff --git a/Documentation/Examples/documentsCollectionUpdateHandle.generated b/Documentation/Examples/documentsCollectionUpdateHandle.generated deleted file mode 100644 index d1c3d514b7f5..000000000000 --- a/Documentation/Examples/documentsCollectionUpdateHandle.generated +++ /dev/null @@ -1,13 +0,0 @@ -arangosh> a1 = db.example.insert({"a" : 1}); -{ - "_id" : "example/18612116", - "_key" : "18612116", - "_rev" : "_YOn1fku--D" -} -arangosh> a2 = db.example.update("example/18612115", { "x" : 1, "y" : 2 }); -{ - "_id" : "example/18612115", - "_key" : "18612115", - "_rev" : "_YOn1fky--_", - "_oldRev" : "_YOn1fku--B" -} diff --git a/Documentation/Examples/documentsCollectionUpdateHandleArray.generated b/Documentation/Examples/documentsCollectionUpdateHandleArray.generated deleted file mode 100644 index 1a607686aac2..000000000000 --- a/Documentation/Examples/documentsCollectionUpdateHandleArray.generated +++ /dev/null @@ -1,45 +0,0 @@ -arangosh> db.example.insert({"a" : { "one" : 1, "two" : 2, "three" : 3 }, -........> "b" : { }}); -{ - "_id" : "example/20774804", - "_key" : "20774804", - "_rev" : "_YOn1flu--D" -} -arangosh> db.example.update("example/20774803", {"a" : { "four" : 4 }, -........> "b" : { "b1" : 1 }}); -{ - "_id" : "example/20774803", - "_key" : "20774803", - "_rev" : "_YOn1fly--_", - "_oldRev" : "_YOn1flu--B" -} -arangosh> db.example.document("example/20774803"); -{ - "_key" : "20774803", - "_id" : "example/20774803", - "_rev" : "_YOn1fly--_", - "b" : { - "b1" : 1 - }, - "a" : { - "four" : 4 - } -} -arangosh> db.example.update("example/20774803", { "a" : { "one" : null }, -........> "b" : null }, -........> false, false); -{ - "_id" : "example/20774803", - "_key" : "20774803", - "_rev" : "_YOn1fly--B", - "_oldRev" : "_YOn1fly--_" -} -arangosh> db.example.document("example/20774803"); -{ - "_key" : "20774803", - "_id" : "example/20774803", - "_rev" : "_YOn1fly--B", - "a" : { - "four" : 4 - } -} diff --git a/Documentation/Examples/documentsCollectionUpdateHandleKeepNull.generated b/Documentation/Examples/documentsCollectionUpdateHandleKeepNull.generated deleted file mode 100644 index b494b6b92a0e..000000000000 --- a/Documentation/Examples/documentsCollectionUpdateHandleKeepNull.generated +++ /dev/null @@ -1,53 +0,0 @@ -arangosh> db.example.insert({"a" : 1}); -{ - "_id" : "example/19988372", - "_key" : "19988372", - "_rev" : "_YOn1fmy--B" -} -arangosh> db.example.update("example/19988371", -........> { "b" : null, "c" : null, "d" : 3 }); -{ - "_id" : "example/19988371", - "_key" : "19988371", - "_rev" : "_YOn1fm2--_", - "_oldRev" : "_YOn1fmy--_" -} -arangosh> db.example.document("example/19988371"); -{ - "_key" : "19988371", - "_id" : "example/19988371", - "_rev" : "_YOn1fm2--_", - "d" : 3, - "b" : null, - "c" : null -} -arangosh> db.example.update("example/19988371", { "a" : null }, false, false); -{ - "_id" : "example/19988371", - "_key" : "19988371", - "_rev" : "_YOn1fm2--B", - "_oldRev" : "_YOn1fm2--_" -} -arangosh> db.example.document("example/19988371"); -{ - "_key" : "19988371", - "_id" : "example/19988371", - "_rev" : "_YOn1fm2--B", - "d" : 3, - "b" : null, - "c" : null -} -arangosh> db.example.update("example/19988371", -........> { "b" : null, "c": null, "d" : null }, false, false); -{ - "_id" : "example/19988371", - "_key" : "19988371", - "_rev" : "_YOn1fm2--D", - "_oldRev" : "_YOn1fm2--B" -} -arangosh> db.example.document("example/19988371"); -{ - "_key" : "19988371", - "_id" : "example/19988371", - "_rev" : "_YOn1fm2--D" -} diff --git a/Documentation/Examples/documentsCollection_UpdateDocument.generated b/Documentation/Examples/documentsCollection_UpdateDocument.generated deleted file mode 100644 index 98d515ff62b4..000000000000 --- a/Documentation/Examples/documentsCollection_UpdateDocument.generated +++ /dev/null @@ -1,51 +0,0 @@ -arangosh> a1 = db.example.insert({"a" : 1}); -{ - "_id" : "example/110794", - "_key" : "110794", - "_rev" : "_YOn1fny--B" -} -arangosh> a2 = db.example.update(a1, {"b" : 2, "c" : 3}); -{ - "_id" : "example/110794", - "_key" : "110794", - "_rev" : "_YOn1fn2--_", - "_oldRev" : "_YOn1fny--B" -} -arangosh> a3 = db.example.update(a1, {"d" : 4}); -[ArangoError 1200: precondition failed] -arangosh> a4 = db.example.update(a2, {"e" : 5, "f" : 6 }); -{ - "_id" : "example/110794", - "_key" : "110794", - "_rev" : "_YOn1fn2--C", - "_oldRev" : "_YOn1fn2--_" -} -arangosh> db.example.document(a4); -{ - "_key" : "110794", - "_id" : "example/110794", - "_rev" : "_YOn1fn2--C", - "a" : 1, - "c" : 3, - "b" : 2, - "f" : 6, - "e" : 5 -} -arangosh> a5 = db.example.update(a4, {"a" : 1, c : 9, e : 42 }); -{ - "_id" : "example/110794", - "_key" : "110794", - "_rev" : "_YOn1fn6--_", - "_oldRev" : "_YOn1fn2--C" -} -arangosh> db.example.document(a5); -{ - "_key" : "110794", - "_id" : "example/110794", - "_rev" : "_YOn1fn6--_", - "a" : 1, - "c" : 9, - "b" : 2, - "f" : 6, - "e" : 42 -} diff --git a/Documentation/Examples/documentsCollection_UpdateHandleArray.generated b/Documentation/Examples/documentsCollection_UpdateHandleArray.generated deleted file mode 100644 index 20312d2a185a..000000000000 --- a/Documentation/Examples/documentsCollection_UpdateHandleArray.generated +++ /dev/null @@ -1,45 +0,0 @@ -arangosh> db.example.insert({"a" : { "one" : 1, "two" : 2, "three" : 3 }, -........> "b" : { }}); -{ - "_id" : "example/20774804", - "_key" : "20774804", - "_rev" : "_YOn1fo6--_" -} -arangosh> db.example.update("example/20774803", {"a" : { "four" : 4 }, -........> "b" : { "b1" : 1 }}); -{ - "_id" : "example/20774803", - "_key" : "20774803", - "_rev" : "_YOn1fo6--B", - "_oldRev" : "_YOn1fo2--B" -} -arangosh> db.example.document("example/20774803"); -{ - "_key" : "20774803", - "_id" : "example/20774803", - "_rev" : "_YOn1fo6--B", - "b" : { - "b1" : 1 - }, - "a" : { - "four" : 4 - } -} -arangosh> db.example.update("example/20774803", { "a" : { "one" : null }, -........> "b" : null }, -........> false, false); -{ - "_id" : "example/20774803", - "_key" : "20774803", - "_rev" : "_YOn1fo6--D", - "_oldRev" : "_YOn1fo6--B" -} -arangosh> db.example.document("example/20774803"); -{ - "_key" : "20774803", - "_id" : "example/20774803", - "_rev" : "_YOn1fo6--D", - "a" : { - "four" : 4 - } -} diff --git a/Documentation/Examples/documentsCollection_UpdateHandleKeepNull.generated b/Documentation/Examples/documentsCollection_UpdateHandleKeepNull.generated deleted file mode 100644 index 54ced0663b60..000000000000 --- a/Documentation/Examples/documentsCollection_UpdateHandleKeepNull.generated +++ /dev/null @@ -1,53 +0,0 @@ -arangosh> db.example.insert({"a" : 1}); -{ - "_id" : "example/19988372", - "_key" : "19988372", - "_rev" : "_YOn1fp6--_" -} -arangosh> db.example.update("example/19988371", -........> { "b" : null, "c" : null, "d" : 3 }); -{ - "_id" : "example/19988371", - "_key" : "19988371", - "_rev" : "_YOn1fp6--B", - "_oldRev" : "_YOn1fp2--B" -} -arangosh> db.example.document("example/19988371"); -{ - "_key" : "19988371", - "_id" : "example/19988371", - "_rev" : "_YOn1fp6--B", - "d" : 3, - "b" : null, - "c" : null -} -arangosh> db.example.update("example/19988371", { "a" : null }, false, false); -{ - "_id" : "example/19988371", - "_key" : "19988371", - "_rev" : "_YOn1fq---_", - "_oldRev" : "_YOn1fp6--B" -} -arangosh> db.example.document("example/19988371"); -{ - "_key" : "19988371", - "_id" : "example/19988371", - "_rev" : "_YOn1fq---_", - "d" : 3, - "b" : null, - "c" : null -} -arangosh> db.example.update("example/19988371", -........> { "b" : null, "c": null, "d" : null }, false, false); -{ - "_id" : "example/19988371", - "_key" : "19988371", - "_rev" : "_YOn1fq---B", - "_oldRev" : "_YOn1fq---_" -} -arangosh> db.example.document("example/19988371"); -{ - "_key" : "19988371", - "_id" : "example/19988371", - "_rev" : "_YOn1fq---B" -} diff --git a/Documentation/Examples/documentsCollection_UpdateHandleSingle.generated b/Documentation/Examples/documentsCollection_UpdateHandleSingle.generated deleted file mode 100644 index c5b1f5ce4ab9..000000000000 --- a/Documentation/Examples/documentsCollection_UpdateHandleSingle.generated +++ /dev/null @@ -1,13 +0,0 @@ -arangosh> a1 = db.example.insert({"a" : 1}); -{ - "_id" : "example/18612116", - "_key" : "18612116", - "_rev" : "_YOn1fq6--D" -} -arangosh> a2 = db.example.update("example/18612115", { "x" : 1, "y" : 2 }); -{ - "_id" : "example/18612115", - "_key" : "18612115", - "_rev" : "_YOn1fr---_", - "_oldRev" : "_YOn1fq6--B" -} diff --git a/Documentation/Examples/documentsDocumentName.generated b/Documentation/Examples/documentsDocumentName.generated deleted file mode 100644 index 0122ea6aab32..000000000000 --- a/Documentation/Examples/documentsDocumentName.generated +++ /dev/null @@ -1,6 +0,0 @@ -arangosh> db._document("example/12345"); -{ - "_key" : "12345", - "_id" : "example/12345", - "_rev" : "_YOn1fs---_" -} diff --git a/Documentation/Examples/documentsDocumentReplace.generated b/Documentation/Examples/documentsDocumentReplace.generated deleted file mode 100644 index d87cbd1501cd..000000000000 --- a/Documentation/Examples/documentsDocumentReplace.generated +++ /dev/null @@ -1,15 +0,0 @@ -arangosh> a1 = db.example.insert({ a : 1 }); -{ - "_id" : "example/110901", - "_key" : "110901", - "_rev" : "_YOn1ft---B" -} -arangosh> a2 = db._replace(a1, { a : 2 }); -{ - "_id" : "example/110901", - "_key" : "110901", - "_rev" : "_YOn1ft---D", - "_oldRev" : "_YOn1ft---B" -} -arangosh> a3 = db._replace(a1, { a : 3 }); -[ArangoError 1200: precondition failed] diff --git a/Documentation/Examples/dropIndex.generated b/Documentation/Examples/dropIndex.generated deleted file mode 100644 index cd270d9f5d96..000000000000 --- a/Documentation/Examples/dropIndex.generated +++ /dev/null @@ -1,56 +0,0 @@ -arangosh> db.example.ensureIndex({ type: "skiplist", fields: [ "a", "b" ] }); -{ - "deduplicate" : true, - "fields" : [ - "a", - "b" - ], - "id" : "example/110919", - "isNewlyCreated" : true, - "sparse" : false, - "type" : "skiplist", - "unique" : false, - "code" : 201 -} -arangosh> var indexInfo = db.example.getIndexes(); -arangosh> indexInfo; -[ - { - "fields" : [ - "_key" - ], - "id" : "example/0", - "selectivityEstimate" : 1, - "sparse" : false, - "type" : "primary", - "unique" : true - }, - { - "deduplicate" : true, - "fields" : [ - "a", - "b" - ], - "id" : "example/110919", - "sparse" : false, - "type" : "skiplist", - "unique" : false - } -] -arangosh> db._dropIndex(indexInfo[0]) -false -arangosh> db._dropIndex(indexInfo[1].id) -true -arangosh> indexInfo = db.example.getIndexes(); -[ - { - "fields" : [ - "_key" - ], - "id" : "example/0", - "selectivityEstimate" : 1, - "sparse" : false, - "type" : "primary", - "unique" : true - } -] diff --git a/Documentation/Examples/ensureFulltextIndex.generated b/Documentation/Examples/ensureFulltextIndex.generated deleted file mode 100644 index 72f0536371a4..000000000000 --- a/Documentation/Examples/ensureFulltextIndex.generated +++ /dev/null @@ -1,95 +0,0 @@ -arangosh> db.example.ensureIndex({ type: "fulltext", fields: [ "text" ], minLength: 3 }); -{ - "fields" : [ - "text" - ], - "id" : "example/110938", - "isNewlyCreated" : true, - "minLength" : 3, - "sparse" : true, - "type" : "fulltext", - "unique" : false, - "code" : 201 -} -arangosh> db.example.save({ text : "the quick brown", b : { c : 1 } }); -{ - "_id" : "example/110941", - "_key" : "110941", - "_rev" : "_YOn1fxq--_" -} -arangosh> db.example.save({ text : "quick brown fox", b : { c : 2 } }); -{ - "_id" : "example/110945", - "_key" : "110945", - "_rev" : "_YOn1fxq--B" -} -arangosh> db.example.save({ text : "brown fox jums", b : { c : 3 } }); -{ - "_id" : "example/110948", - "_key" : "110948", - "_rev" : "_YOn1fxu--_" -} -arangosh> db.example.save({ text : "fox jumps over", b : { c : 4 } }); -{ - "_id" : "example/110951", - "_key" : "110951", - "_rev" : "_YOn1fxu--B" -} -arangosh> db.example.save({ text : "jumps over the", b : { c : 5 } }); -{ - "_id" : "example/110954", - "_key" : "110954", - "_rev" : "_YOn1fxu--D" -} -arangosh> db.example.save({ text : "over the lazy", b : { c : 6 } }); -{ - "_id" : "example/110957", - "_key" : "110957", - "_rev" : "_YOn1fxy--_" -} -arangosh> db.example.save({ text : "the lazy dog", b : { c : 7 } }); -{ - "_id" : "example/110960", - "_key" : "110960", - "_rev" : "_YOn1fxy--B" -} -arangosh> db._query("FOR document IN FULLTEXT(example, 'text', 'the') RETURN document"); -[ - { - "_key" : "110941", - "_id" : "example/110941", - "_rev" : "_YOn1fxq--_", - "text" : "the quick brown", - "b" : { - "c" : 1 - } - }, - { - "_key" : "110954", - "_id" : "example/110954", - "_rev" : "_YOn1fxu--D", - "text" : "jumps over the", - "b" : { - "c" : 5 - } - }, - { - "_key" : "110957", - "_id" : "example/110957", - "_rev" : "_YOn1fxy--_", - "text" : "over the lazy", - "b" : { - "c" : 6 - } - }, - { - "_key" : "110960", - "_id" : "example/110960", - "_rev" : "_YOn1fxy--B", - "text" : "the lazy dog", - "b" : { - "c" : 7 - } - } -] -[object ArangoQueryCursor, count: 4, cached: false, hasMore: false] diff --git a/Documentation/Examples/ensureHashIndex.generated b/Documentation/Examples/ensureHashIndex.generated deleted file mode 100644 index cf636007a1cb..000000000000 --- a/Documentation/Examples/ensureHashIndex.generated +++ /dev/null @@ -1,32 +0,0 @@ -arangosh> db.test.ensureIndex({ type: "hash", fields: [ "a" ] }); -{ - "deduplicate" : true, - "fields" : [ - "a" - ], - "id" : "test/110975", - "isNewlyCreated" : true, - "selectivityEstimate" : 1, - "sparse" : false, - "type" : "hash", - "unique" : false, - "code" : 201 -} -arangosh> db.test.save({ a : 1 }); -{ - "_id" : "test/110978", - "_key" : "110978", - "_rev" : "_YOn1fzu--_" -} -arangosh> db.test.save({ a : 1 }); -{ - "_id" : "test/110982", - "_key" : "110982", - "_rev" : "_YOn1fzu--B" -} -arangosh> db.test.save({ a : null }); -{ - "_id" : "test/110985", - "_key" : "110985", - "_rev" : "_YOn1fzy--_" -} diff --git a/Documentation/Examples/ensureHashIndexArray.generated b/Documentation/Examples/ensureHashIndexArray.generated deleted file mode 100644 index 14d72b95acfd..000000000000 --- a/Documentation/Examples/ensureHashIndexArray.generated +++ /dev/null @@ -1,32 +0,0 @@ -arangosh> db.test.ensureIndex({ type: "hash", fields: [ "a[*]" ] }); -{ - "deduplicate" : true, - "fields" : [ - "a[*]" - ], - "id" : "test/110999", - "isNewlyCreated" : true, - "selectivityEstimate" : 1, - "sparse" : false, - "type" : "hash", - "unique" : false, - "code" : 201 -} -arangosh> db.test.save({ a : [ 1, 2 ] }); -{ - "_id" : "test/111002", - "_key" : "111002", - "_rev" : "_YOn1f1y--_" -} -arangosh> db.test.save({ a : [ 1, 3 ] }); -{ - "_id" : "test/111006", - "_key" : "111006", - "_rev" : "_YOn1f12--_" -} -arangosh> db.test.save({ a : null }); -{ - "_id" : "test/111009", - "_key" : "111009", - "_rev" : "_YOn1f12--B" -} diff --git a/Documentation/Examples/ensurePersistent.generated b/Documentation/Examples/ensurePersistent.generated deleted file mode 100644 index 4aaa248f6490..000000000000 --- a/Documentation/Examples/ensurePersistent.generated +++ /dev/null @@ -1,43 +0,0 @@ -arangosh> db.names.ensureIndex({ type: "persistent", fields: [ "first" ] }); -{ - "deduplicate" : true, - "fields" : [ - "first" - ], - "id" : "names/111023", - "isNewlyCreated" : true, - "sparse" : false, - "type" : "persistent", - "unique" : false, - "code" : 201 -} -arangosh> db.names.save({ "first" : "Tim" }); -{ - "_id" : "names/111026", - "_key" : "111026", - "_rev" : "_YOn1f36--_" -} -arangosh> db.names.save({ "first" : "Tom" }); -{ - "_id" : "names/111030", - "_key" : "111030", - "_rev" : "_YOn1f36--B" -} -arangosh> db.names.save({ "first" : "John" }); -{ - "_id" : "names/111033", - "_key" : "111033", - "_rev" : "_YOn1f36--D" -} -arangosh> db.names.save({ "first" : "Tim" }); -{ - "_id" : "names/111036", - "_key" : "111036", - "_rev" : "_YOn1f4---_" -} -arangosh> db.names.save({ "first" : "Tom" }); -{ - "_id" : "names/111039", - "_key" : "111039", - "_rev" : "_YOn1f4---B" -} diff --git a/Documentation/Examples/ensureSkiplist.generated b/Documentation/Examples/ensureSkiplist.generated deleted file mode 100644 index 4885ffc0e8af..000000000000 --- a/Documentation/Examples/ensureSkiplist.generated +++ /dev/null @@ -1,43 +0,0 @@ -arangosh> db.names.ensureIndex({ type: "skiplist", fields: [ "first" ] }); -{ - "deduplicate" : true, - "fields" : [ - "first" - ], - "id" : "names/111053", - "isNewlyCreated" : true, - "sparse" : false, - "type" : "skiplist", - "unique" : false, - "code" : 201 -} -arangosh> db.names.save({ "first" : "Tim" }); -{ - "_id" : "names/111056", - "_key" : "111056", - "_rev" : "_YOn1f6---_" -} -arangosh> db.names.save({ "first" : "Tom" }); -{ - "_id" : "names/111060", - "_key" : "111060", - "_rev" : "_YOn1f6---B" -} -arangosh> db.names.save({ "first" : "John" }); -{ - "_id" : "names/111063", - "_key" : "111063", - "_rev" : "_YOn1f6---D" -} -arangosh> db.names.save({ "first" : "Tim" }); -{ - "_id" : "names/111066", - "_key" : "111066", - "_rev" : "_YOn1f6C--_" -} -arangosh> db.names.save({ "first" : "Tom" }); -{ - "_id" : "names/111069", - "_key" : "111069", - "_rev" : "_YOn1f6C--B" -} diff --git a/Documentation/Examples/ensureSkiplistArray.generated b/Documentation/Examples/ensureSkiplistArray.generated deleted file mode 100644 index d5628ffcaaa7..000000000000 --- a/Documentation/Examples/ensureSkiplistArray.generated +++ /dev/null @@ -1,31 +0,0 @@ -arangosh> db.test.ensureIndex({ type: "skiplist", fields: [ "a[*]" ] }); -{ - "deduplicate" : true, - "fields" : [ - "a[*]" - ], - "id" : "test/111083", - "isNewlyCreated" : true, - "sparse" : false, - "type" : "skiplist", - "unique" : false, - "code" : 201 -} -arangosh> db.test.save({ a : [ 1, 2 ] }); -{ - "_id" : "test/111086", - "_key" : "111086", - "_rev" : "_YOn1f8G--_" -} -arangosh> db.test.save({ a : [ 1, 3 ] }); -{ - "_id" : "test/111090", - "_key" : "111090", - "_rev" : "_YOn1f8G--B" -} -arangosh> db.test.save({ a : null }); -{ - "_id" : "test/111093", - "_key" : "111093", - "_rev" : "_YOn1f8G--D" -} diff --git a/Documentation/Examples/ensureTtlIndex.generated b/Documentation/Examples/ensureTtlIndex.generated deleted file mode 100644 index 0726a8b47dc9..000000000000 --- a/Documentation/Examples/ensureTtlIndex.generated +++ /dev/null @@ -1,14 +0,0 @@ -arangosh> db.test.ensureIndex({ type: "ttl", fields: [ "creationDate" ], expireAfter: 600 }); -{ - "expireAfter" : 600, - "fields" : [ - "creationDate" - ], - "id" : "test/111107", - "isNewlyCreated" : true, - "sparse" : true, - "type" : "ttl", - "unique" : false, - "code" : 201 -} -arangosh> for (let i = 0; i < 100; ++i) { db.test.insert({ creationDate: Date.now() / 1000 }); } diff --git a/Documentation/Examples/ensureUniqueConstraint.generated b/Documentation/Examples/ensureUniqueConstraint.generated deleted file mode 100644 index b65ceff8c99a..000000000000 --- a/Documentation/Examples/ensureUniqueConstraint.generated +++ /dev/null @@ -1,31 +0,0 @@ -arangosh> db.test.ensureIndex({ type: "hash", fields: [ "a", "b.c" ], unique: true }); -{ - "deduplicate" : true, - "fields" : [ - "a", - "b.c" - ], - "id" : "test/111422", - "isNewlyCreated" : true, - "selectivityEstimate" : 1, - "sparse" : false, - "type" : "hash", - "unique" : true, - "code" : 201 -} -arangosh> db.test.save({ a : 1, b : { c : 1 } }); -{ - "_id" : "test/111425", - "_key" : "111425", - "_rev" : "_YOn1gAy--_" -} -arangosh> db.test.save({ a : 1, b : { c : 1 } }); -[ArangoError 1210: - in index 111422 of type hash over 'a, b.c'; conflicting key: 111425] -arangosh> db.test.save({ a : 1, b : { c : null } }); -{ - "_id" : "test/111431", - "_key" : "111431", - "_rev" : "_YOn1gA2--B" -} -arangosh> db.test.save({ a : 1 }); -[ArangoError 1210: - in index 111422 of type hash over 'a, b.c'; conflicting key: 111431] diff --git a/Documentation/Examples/ensureUniquePersistentMultiColmun.generated b/Documentation/Examples/ensureUniquePersistentMultiColmun.generated deleted file mode 100644 index d084126d7b58..000000000000 --- a/Documentation/Examples/ensureUniquePersistentMultiColmun.generated +++ /dev/null @@ -1,34 +0,0 @@ -arangosh> db.ids.ensureIndex({ type: "persistent", fields: [ "name.first", "name.last" ], unique: true }); -{ - "deduplicate" : true, - "fields" : [ - "name.first", - "name.last" - ], - "id" : "ids/111447", - "isNewlyCreated" : true, - "sparse" : false, - "type" : "persistent", - "unique" : true, - "code" : 201 -} -arangosh> db.ids.save({ "name" : { "first" : "hans", "last": "hansen" }}); -{ - "_id" : "ids/111450", - "_key" : "111450", - "_rev" : "_YOn1gC6--_" -} -arangosh> db.ids.save({ "name" : { "first" : "jens", "last": "jensen" }}); -{ - "_id" : "ids/111454", - "_key" : "111454", - "_rev" : "_YOn1gC6--B" -} -arangosh> db.ids.save({ "name" : { "first" : "hans", "last": "jensen" }}); -{ - "_id" : "ids/111457", - "_key" : "111457", - "_rev" : "_YOn1gC6--D" -} -arangosh> db.ids.save({ "name" : { "first" : "hans", "last": "hansen" }}); -[ArangoError 1210: - in index 111447 of type persistent over 'name.first, name.last'; conflicting key: 111450] diff --git a/Documentation/Examples/ensureUniquePersistentSingle.generated b/Documentation/Examples/ensureUniquePersistentSingle.generated deleted file mode 100644 index 8b085afc8d60..000000000000 --- a/Documentation/Examples/ensureUniquePersistentSingle.generated +++ /dev/null @@ -1,33 +0,0 @@ -arangosh> db.ids.ensureIndex({ type: "persistent", fields: [ "myId" ], unique: true }); -{ - "deduplicate" : true, - "fields" : [ - "myId" - ], - "id" : "ids/111473", - "isNewlyCreated" : true, - "sparse" : false, - "type" : "persistent", - "unique" : true, - "code" : 201 -} -arangosh> db.ids.save({ "myId": 123 }); -{ - "_id" : "ids/111476", - "_key" : "111476", - "_rev" : "_YOn1gF---_" -} -arangosh> db.ids.save({ "myId": 456 }); -{ - "_id" : "ids/111480", - "_key" : "111480", - "_rev" : "_YOn1gFC--_" -} -arangosh> db.ids.save({ "myId": 789 }); -{ - "_id" : "ids/111483", - "_key" : "111483", - "_rev" : "_YOn1gFC--B" -} -arangosh> db.ids.save({ "myId": 123 }); -[ArangoError 1210: - in index 111473 of type persistent over 'myId'; conflicting key: 111476] diff --git a/Documentation/Examples/ensureUniqueSkiplist.generated b/Documentation/Examples/ensureUniqueSkiplist.generated deleted file mode 100644 index 4922ded81581..000000000000 --- a/Documentation/Examples/ensureUniqueSkiplist.generated +++ /dev/null @@ -1,33 +0,0 @@ -arangosh> db.ids.ensureIndex({ type: "skiplist", fields: [ "myId" ], unique: true }); -{ - "deduplicate" : true, - "fields" : [ - "myId" - ], - "id" : "ids/111499", - "isNewlyCreated" : true, - "sparse" : false, - "type" : "skiplist", - "unique" : true, - "code" : 201 -} -arangosh> db.ids.save({ "myId": 123 }); -{ - "_id" : "ids/111502", - "_key" : "111502", - "_rev" : "_YOn1gHC--_" -} -arangosh> db.ids.save({ "myId": 456 }); -{ - "_id" : "ids/111506", - "_key" : "111506", - "_rev" : "_YOn1gHG--_" -} -arangosh> db.ids.save({ "myId": 789 }); -{ - "_id" : "ids/111509", - "_key" : "111509", - "_rev" : "_YOn1gHG--B" -} -arangosh> db.ids.save({ "myId": 123 }); -[ArangoError 1210: - in index 111499 of type skiplist over 'myId'; conflicting key: 111502] diff --git a/Documentation/Examples/ensureUniqueSkiplistMultiColmun.generated b/Documentation/Examples/ensureUniqueSkiplistMultiColmun.generated deleted file mode 100644 index 1b2e1ffd6587..000000000000 --- a/Documentation/Examples/ensureUniqueSkiplistMultiColmun.generated +++ /dev/null @@ -1,33 +0,0 @@ -arangosh> db.ids.ensureIndex({ type: "skiplist", fields: [ "name.first", "name.last" ], unique: true }); -{ - "deduplicate" : true, - "fields" : [ - "name.first", - "name.last" - ], - "id" : "ids/111525", - "isNewlyCreated" : true, - "sparse" : false, - "type" : "skiplist", - "unique" : true, - "code" : 201 -} -arangosh> db.ids.save({ "name" : { "first" : "hans", "last": "hansen" }}); -{ - "_id" : "ids/111528", - "_key" : "111528", - "_rev" : "_YOn1gJK--_" -} -arangosh> db.ids.save({ "name" : { "first" : "jens", "last": "jensen" }}); -{ - "_id" : "ids/111532", - "_key" : "111532", - "_rev" : "_YOn1gJK--B" -} -arangosh> db.ids.save({ "name" : { "first" : "hans", "last": "jensen" }}); -{ - "_id" : "ids/111535", - "_key" : "111535", - "_rev" : "_YOn1gJK--D" -} -[ArangoError 1210: - in index 111525 of type skiplist over 'name.first, name.last'; conflicting key: 111528] diff --git a/Documentation/Examples/ensureUniqueSkiplistMultiColumn.generated b/Documentation/Examples/ensureUniqueSkiplistMultiColumn.generated deleted file mode 100644 index 7527479e7d4c..000000000000 --- a/Documentation/Examples/ensureUniqueSkiplistMultiColumn.generated +++ /dev/null @@ -1,34 +0,0 @@ -arangosh> db.ids.ensureIndex({ type: "skiplist", fields: [ "name.first", "name.last" ], unique: true }); -{ - "deduplicate" : true, - "fields" : [ - "name.first", - "name.last" - ], - "id" : "ids/111551", - "isNewlyCreated" : true, - "sparse" : false, - "type" : "skiplist", - "unique" : true, - "code" : 201 -} -arangosh> db.ids.save({ "name" : { "first" : "hans", "last": "hansen" }}); -{ - "_id" : "ids/111554", - "_key" : "111554", - "_rev" : "_YOn1gLO--_" -} -arangosh> db.ids.save({ "name" : { "first" : "jens", "last": "jensen" }}); -{ - "_id" : "ids/111558", - "_key" : "111558", - "_rev" : "_YOn1gLO--B" -} -arangosh> db.ids.save({ "name" : { "first" : "hans", "last": "jensen" }}); -{ - "_id" : "ids/111561", - "_key" : "111561", - "_rev" : "_YOn1gLS--_" -} -arangosh> db.ids.save({ "name" : { "first" : "hans", "last": "hansen" }}); -[ArangoError 1210: - in index 111551 of type skiplist over 'name.first, name.last'; conflicting key: 111554] diff --git a/Documentation/Examples/ensureUniqueSkiplistSingle.generated b/Documentation/Examples/ensureUniqueSkiplistSingle.generated deleted file mode 100644 index 3a205eaae082..000000000000 --- a/Documentation/Examples/ensureUniqueSkiplistSingle.generated +++ /dev/null @@ -1,33 +0,0 @@ -arangosh> db.ids.ensureIndex({ type: "skiplist", fields: [ "myId" ], unique: true }); -{ - "deduplicate" : true, - "fields" : [ - "myId" - ], - "id" : "ids/111577", - "isNewlyCreated" : true, - "sparse" : false, - "type" : "skiplist", - "unique" : true, - "code" : 201 -} -arangosh> db.ids.save({ "myId": 123 }); -{ - "_id" : "ids/111580", - "_key" : "111580", - "_rev" : "_YOn1gNS--_" -} -arangosh> db.ids.save({ "myId": 456 }); -{ - "_id" : "ids/111584", - "_key" : "111584", - "_rev" : "_YOn1gNW--_" -} -arangosh> db.ids.save({ "myId": 789 }); -{ - "_id" : "ids/111587", - "_key" : "111587", - "_rev" : "_YOn1gNW--B" -} -arangosh> db.ids.save({ "myId": 123 }); -[ArangoError 1210: - in index 111577 of type skiplist over 'myId'; conflicting key: 111580] diff --git a/Documentation/Examples/ensureVertexCentricHashIndex.generated b/Documentation/Examples/ensureVertexCentricHashIndex.generated deleted file mode 100644 index 5e4dbf015d09..000000000000 --- a/Documentation/Examples/ensureVertexCentricHashIndex.generated +++ /dev/null @@ -1,15 +0,0 @@ -arangosh> db.collection.ensureIndex({ type: "hash", fields: [ "_from", "type" ] }) -{ - "deduplicate" : true, - "fields" : [ - "_from", - "type" - ], - "id" : "collection/111603", - "isNewlyCreated" : true, - "selectivityEstimate" : 1, - "sparse" : false, - "type" : "hash", - "unique" : false, - "code" : 201 -} diff --git a/Documentation/Examples/executeQuery.generated b/Documentation/Examples/executeQuery.generated deleted file mode 100644 index 9cc9973d383a..000000000000 --- a/Documentation/Examples/executeQuery.generated +++ /dev/null @@ -1,23 +0,0 @@ -arangosh> result = db.users.all().toArray(); -[ - { - "_key" : "111624", - "_id" : "users/111624", - "_rev" : "_YOn1gQe--D", - "name" : "Angela" - }, - { - "_key" : "111617", - "_id" : "users/111617", - "_rev" : "_YOn1gQe--_", - "name" : "Gerhard" - }, - { - "_key" : "111621", - "_id" : "users/111621", - "_rev" : "_YOn1gQe--B", - "name" : "Helmut" - } -] -arangosh> q = db.users.all(); q.execute(); result = [ ]; while (q.hasNext()) { result.push(q.next()); } -SimpleQueryAll(users) diff --git a/Documentation/Examples/executeQueryBatchSize.generated b/Documentation/Examples/executeQueryBatchSize.generated deleted file mode 100644 index cb3335a75140..000000000000 --- a/Documentation/Examples/executeQueryBatchSize.generated +++ /dev/null @@ -1,40 +0,0 @@ -arangosh> q = db.users.all(); q.setBatchSize(20); q.execute(); while (q.hasNext()) { print(q.next()); } -{ - "_key" : "111646", - "_id" : "users/111646", - "_rev" : "_YOn1gTC--_", - "name" : "Helmut" -} -{ - "_key" : "111649", - "_id" : "users/111649", - "_rev" : "_YOn1gTC--B", - "name" : "Angela" -} -{ - "_key" : "111642", - "_id" : "users/111642", - "_rev" : "_YOn1gT---B", - "name" : "Gerhard" -} -SimpleQueryAll(users) -arangosh> q = db.users.all(); q.execute(20); while (q.hasNext()) { print(q.next()); } -{ - "_key" : "111646", - "_id" : "users/111646", - "_rev" : "_YOn1gTC--_", - "name" : "Helmut" -} -{ - "_key" : "111649", - "_id" : "users/111649", - "_rev" : "_YOn1gTC--B", - "name" : "Angela" -} -{ - "_key" : "111642", - "_id" : "users/111642", - "_rev" : "_YOn1gT---B", - "name" : "Gerhard" -} -SimpleQueryAll(users) diff --git a/Documentation/Examples/executeQueryNoBatchSize.generated b/Documentation/Examples/executeQueryNoBatchSize.generated deleted file mode 100644 index 7a5582785d02..000000000000 --- a/Documentation/Examples/executeQueryNoBatchSize.generated +++ /dev/null @@ -1,26 +0,0 @@ -arangosh> result = db.users.all().toArray(); -[ - { - "_key" : "111667", - "_id" : "users/111667", - "_rev" : "_YOn1gUC--_", - "name" : "Gerhard" - }, - { - "_key" : "111671", - "_id" : "users/111671", - "_rev" : "_YOn1gUC--B", - "name" : "Helmut" - }, - { - "_key" : "111674", - "_id" : "users/111674", - "_rev" : "_YOn1gUC--D", - "name" : "Angela" - } -] -arangosh> var q = db._query("FOR x IN users RETURN x"); -........> result = [ ]; -........> while (q.hasNext()) { -........> result.push(q.next()); -........> } diff --git a/Documentation/Examples/generalGraphCreateGraph.generated b/Documentation/Examples/generalGraphCreateGraph.generated deleted file mode 100644 index ca1b6210f10a..000000000000 --- a/Documentation/Examples/generalGraphCreateGraph.generated +++ /dev/null @@ -1,3 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph"); -arangosh> graph = graph_module._create("myGraph"); -[ Graph myGraph EdgeDefinitions: [ ] VertexCollections: [ ] ] diff --git a/Documentation/Examples/generalGraphCreateGraph2.generated b/Documentation/Examples/generalGraphCreateGraph2.generated deleted file mode 100644 index 76e4fe1250e5..000000000000 --- a/Documentation/Examples/generalGraphCreateGraph2.generated +++ /dev/null @@ -1,9 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph"); -arangosh> graph = graph_module._create("myGraph", -........> [graph_module._relation("myRelation", ["male", "female"], ["male", "female"])], ["sessions"]); -{[Graph] - "myRelation" : [ArangoCollection 111701, "myRelation" (type edge, status loaded)], - "female" : [ArangoCollection 111691, "female" (type document, status loaded)], - "male" : [ArangoCollection 111696, "male" (type document, status loaded)], - "sessions" : [ArangoCollection 111686, "sessions" (type document, status loaded)] -} diff --git a/Documentation/Examples/generalGraphCreateGraphHowTo1.generated b/Documentation/Examples/generalGraphCreateGraphHowTo1.generated deleted file mode 100644 index ea5416fc61ae..000000000000 --- a/Documentation/Examples/generalGraphCreateGraphHowTo1.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph"); -arangosh> var graph = graph_module._create("myGraph"); -arangosh> graph; -{[Graph] -} diff --git a/Documentation/Examples/generalGraphCreateGraphHowTo2.generated b/Documentation/Examples/generalGraphCreateGraphHowTo2.generated deleted file mode 100644 index 6d3ee72566b8..000000000000 --- a/Documentation/Examples/generalGraphCreateGraphHowTo2.generated +++ /dev/null @@ -1,6 +0,0 @@ -arangosh> graph._addVertexCollection("shop"); -arangosh> graph._addVertexCollection("customer"); -arangosh> graph._addVertexCollection("pet"); -arangosh> graph; -{[Graph] -} diff --git a/Documentation/Examples/generalGraphCreateGraphHowTo3.generated b/Documentation/Examples/generalGraphCreateGraphHowTo3.generated deleted file mode 100644 index 98667d502025..000000000000 --- a/Documentation/Examples/generalGraphCreateGraphHowTo3.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> var rel = graph_module._relation("isCustomer", ["shop"], ["customer"]); -arangosh> graph._extendEdgeDefinitions(rel); -arangosh> graph; -{[Graph] -} diff --git a/Documentation/Examples/generalGraphCreateGraphNoData.generated b/Documentation/Examples/generalGraphCreateGraphNoData.generated deleted file mode 100644 index 4e584abc7135..000000000000 --- a/Documentation/Examples/generalGraphCreateGraphNoData.generated +++ /dev/null @@ -1,4 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph"); -arangosh> graph = graph_module._create("myGraph"); -{[Graph] -} diff --git a/Documentation/Examples/generalGraphCreateGraphSingle.generated b/Documentation/Examples/generalGraphCreateGraphSingle.generated deleted file mode 100644 index f7eb94cf315a..000000000000 --- a/Documentation/Examples/generalGraphCreateGraphSingle.generated +++ /dev/null @@ -1,7 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph"); -arangosh> var edgeDefinitions = [ { collection: "edges", "from": [ "vertices" ], "to" : [ "vertices" ] } ]; -arangosh> graph = graph_module._create("myGraph", edgeDefinitions); -{[Graph] - "edges" : [ArangoCollection 111905, "edges" (type edge, status loaded)], - "vertices" : [ArangoCollection 111900, "vertices" (type document, status loaded)] -} diff --git a/Documentation/Examples/generalGraphDropGraphDropCollections.generated b/Documentation/Examples/generalGraphDropGraphDropCollections.generated deleted file mode 100644 index 8573a78605b0..000000000000 --- a/Documentation/Examples/generalGraphDropGraphDropCollections.generated +++ /dev/null @@ -1,8 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph"); -arangosh> graph_module._drop("social", true); -arangosh> db._collection("female"); -null -arangosh> db._collection("male"); -null -arangosh> db._collection("relation"); -null diff --git a/Documentation/Examples/generalGraphDropGraphKeep.generated b/Documentation/Examples/generalGraphDropGraphKeep.generated deleted file mode 100644 index 104346aca747..000000000000 --- a/Documentation/Examples/generalGraphDropGraphKeep.generated +++ /dev/null @@ -1,8 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph"); -arangosh> graph_module._drop("social"); -arangosh> db._collection("female"); -[ArangoCollection 112003, "female" (type document, status loaded)] -arangosh> db._collection("male"); -[ArangoCollection 112009, "male" (type document, status loaded)] -arangosh> db._collection("relation"); -[ArangoCollection 112015, "relation" (type edge, status loaded)] diff --git a/Documentation/Examples/generalGraphEdgeCollectionRemove.generated b/Documentation/Examples/generalGraphEdgeCollectionRemove.generated deleted file mode 100644 index b3ae76747fec..000000000000 --- a/Documentation/Examples/generalGraphEdgeCollectionRemove.generated +++ /dev/null @@ -1,14 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("social"); -arangosh> graph.relation.save("female/alice", "female/diana", {_key: "aliceAndDiana"}); -{ - "_id" : "relation/aliceAndDiana", - "_key" : "aliceAndDiana", - "_rev" : "_YOn1gqS--F" -} -arangosh> db._exists("relation/aliceAndDiana") -true -arangosh> graph.relation.remove("relation/aliceAndDiana") -true -arangosh> db._exists("relation/aliceAndDiana") -false diff --git a/Documentation/Examples/generalGraphEdgeCollectionReplace.generated b/Documentation/Examples/generalGraphEdgeCollectionReplace.generated deleted file mode 100644 index e9288de5697b..000000000000 --- a/Documentation/Examples/generalGraphEdgeCollectionReplace.generated +++ /dev/null @@ -1,15 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("social"); -arangosh> graph.relation.save("female/alice", "female/diana", {typo: "nose", _key: "aliceAndDiana"}); -{ - "_id" : "relation/aliceAndDiana", - "_key" : "aliceAndDiana", - "_rev" : "_YOn1gu6--B" -} -arangosh> graph.relation.replace("relation/aliceAndDiana", {type: "knows", _from: "female/alice", _to: "female/diana"}); -{ - "_id" : "relation/aliceAndDiana", - "_key" : "aliceAndDiana", - "_rev" : "_YOn1gv---_", - "_oldRev" : "_YOn1gu6--B" -} diff --git a/Documentation/Examples/generalGraphEdgeCollectionSave1.generated b/Documentation/Examples/generalGraphEdgeCollectionSave1.generated deleted file mode 100644 index 8c62a767c4e0..000000000000 --- a/Documentation/Examples/generalGraphEdgeCollectionSave1.generated +++ /dev/null @@ -1,8 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("social"); -arangosh> graph.relation.save("male/bob", "female/alice", {type: "married", _key: "bobAndAlice"}); -{ - "_id" : "relation/bobAndAlice", - "_key" : "bobAndAlice", - "_rev" : "_YOn1gx6--J" -} diff --git a/Documentation/Examples/generalGraphEdgeCollectionSave2.generated b/Documentation/Examples/generalGraphEdgeCollectionSave2.generated deleted file mode 100644 index 82d7d80007c6..000000000000 --- a/Documentation/Examples/generalGraphEdgeCollectionSave2.generated +++ /dev/null @@ -1,7 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("social"); -arangosh> graph.relation.save( -........> "relation/aliceAndBob", -........> "female/alice", -........> {type: "married", _key: "bobAndAlice"}); -[ArangoError 1906: invalid edge between relation/aliceAndBob and female/alice. Doesn't conform to any edge definition] diff --git a/Documentation/Examples/generalGraphEdgeCollectionUpdate.generated b/Documentation/Examples/generalGraphEdgeCollectionUpdate.generated deleted file mode 100644 index 27e93647c667..000000000000 --- a/Documentation/Examples/generalGraphEdgeCollectionUpdate.generated +++ /dev/null @@ -1,15 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("social"); -arangosh> graph.relation.save("female/alice", "female/diana", {type: "knows", _key: "aliceAndDiana"}); -{ - "_id" : "relation/aliceAndDiana", - "_key" : "aliceAndDiana", - "_rev" : "_YOn1g4S--_" -} -arangosh> graph.relation.update("relation/aliceAndDiana", {type: "quarreled", _key: "aliceAndDiana"}); -{ - "_id" : "relation/aliceAndDiana", - "_key" : "aliceAndDiana", - "_rev" : "_YOn1g4S--B", - "_oldRev" : "_YOn1g4S--_" -} diff --git a/Documentation/Examples/generalGraphEdgeDefinitions.generated b/Documentation/Examples/generalGraphEdgeDefinitions.generated deleted file mode 100644 index 3a77af8f1072..000000000000 --- a/Documentation/Examples/generalGraphEdgeDefinitions.generated +++ /dev/null @@ -1,42 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph"); -arangosh> directed_relation = graph_module._relation("lives_in", "user", "city"); -{ - "collection" : "lives_in", - "from" : [ - "user" - ], - "to" : [ - "city" - ] -} -arangosh> undirected_relation = graph_module._relation("knows", "user", "user"); -{ - "collection" : "knows", - "from" : [ - "user" - ], - "to" : [ - "user" - ] -} -arangosh> edgedefinitions = graph_module._edgeDefinitions(directed_relation, undirected_relation); -[ - { - "collection" : "lives_in", - "from" : [ - "user" - ], - "to" : [ - "city" - ] - }, - { - "collection" : "knows", - "from" : [ - "user" - ], - "to" : [ - "user" - ] - } -] diff --git a/Documentation/Examples/generalGraphEdgeDefinitionsExtend.generated b/Documentation/Examples/generalGraphEdgeDefinitionsExtend.generated deleted file mode 100644 index 6d39f83477d7..000000000000 --- a/Documentation/Examples/generalGraphEdgeDefinitionsExtend.generated +++ /dev/null @@ -1,34 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph"); -arangosh> directed_relation = graph_module._relation("lives_in", "user", "city"); -{ - "collection" : "lives_in", - "from" : [ - "user" - ], - "to" : [ - "city" - ] -} -arangosh> undirected_relation = graph_module._relation("knows", "user", "user"); -{ - "collection" : "knows", - "from" : [ - "user" - ], - "to" : [ - "user" - ] -} -arangosh> edgedefinitions = graph_module._edgeDefinitions(directed_relation); -[ - { - "collection" : "lives_in", - "from" : [ - "user" - ], - "to" : [ - "city" - ] - } -] -arangosh> edgedefinitions = graph_module._extendEdgeDefinitions(undirected_relation); diff --git a/Documentation/Examples/generalGraphEdgeDefinitionsSimple.generated b/Documentation/Examples/generalGraphEdgeDefinitionsSimple.generated deleted file mode 100644 index 3a77af8f1072..000000000000 --- a/Documentation/Examples/generalGraphEdgeDefinitionsSimple.generated +++ /dev/null @@ -1,42 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph"); -arangosh> directed_relation = graph_module._relation("lives_in", "user", "city"); -{ - "collection" : "lives_in", - "from" : [ - "user" - ], - "to" : [ - "city" - ] -} -arangosh> undirected_relation = graph_module._relation("knows", "user", "user"); -{ - "collection" : "knows", - "from" : [ - "user" - ], - "to" : [ - "user" - ] -} -arangosh> edgedefinitions = graph_module._edgeDefinitions(directed_relation, undirected_relation); -[ - { - "collection" : "lives_in", - "from" : [ - "user" - ], - "to" : [ - "city" - ] - }, - { - "collection" : "knows", - "from" : [ - "user" - ], - "to" : [ - "user" - ] - } -] diff --git a/Documentation/Examples/generalGraphGetFromVertex.generated b/Documentation/Examples/generalGraphGetFromVertex.generated deleted file mode 100644 index 414426c58c36..000000000000 --- a/Documentation/Examples/generalGraphGetFromVertex.generated +++ /dev/null @@ -1,10 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("social"); -arangosh> var any = require("@arangodb").db.relation.any(); -arangosh> graph._fromVertex("relation/" + any._key); -{ - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1g7K--_", - "name" : "Alice" -} diff --git a/Documentation/Examples/generalGraphGetToVertex.generated b/Documentation/Examples/generalGraphGetToVertex.generated deleted file mode 100644 index 463a0f711252..000000000000 --- a/Documentation/Examples/generalGraphGetToVertex.generated +++ /dev/null @@ -1,10 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("social"); -arangosh> var any = require("@arangodb").db.relation.any(); -arangosh> graph._toVertex("relation/" + any._key); -{ - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "_YOn1h-S--B", - "name" : "Bob" -} diff --git a/Documentation/Examples/generalGraphList.generated b/Documentation/Examples/generalGraphList.generated deleted file mode 100644 index f71ed6eb196a..000000000000 --- a/Documentation/Examples/generalGraphList.generated +++ /dev/null @@ -1,3 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph"); -arangosh> graph_module._list(); -[ ] diff --git a/Documentation/Examples/generalGraphLoadGraph.generated b/Documentation/Examples/generalGraphLoadGraph.generated deleted file mode 100644 index 28419602cc9f..000000000000 --- a/Documentation/Examples/generalGraphLoadGraph.generated +++ /dev/null @@ -1,7 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph"); -arangosh> graph = graph_module._graph("social"); -{[Graph] - "relation" : [ArangoCollection 112618, "relation" (type edge, status loaded)], - "female" : [ArangoCollection 112606, "female" (type document, status loaded)], - "male" : [ArangoCollection 112612, "male" (type document, status loaded)] -} diff --git a/Documentation/Examples/generalGraphModuleAbsBetweenness1.generated b/Documentation/Examples/generalGraphModuleAbsBetweenness1.generated deleted file mode 100644 index 1fd2f83bc308..000000000000 --- a/Documentation/Examples/generalGraphModuleAbsBetweenness1.generated +++ /dev/null @@ -1,10 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._absoluteBetweenness({}); -{ - "frenchCity/Lyon" : 0, - "frenchCity/Paris" : 0, - "germanCity/Cologne" : 0, - "germanCity/Hamburg" : 0, - "germanCity/Berlin" : 0 -} diff --git a/Documentation/Examples/generalGraphModuleAbsBetweenness2.generated b/Documentation/Examples/generalGraphModuleAbsBetweenness2.generated deleted file mode 100644 index 3d0547c2d66f..000000000000 --- a/Documentation/Examples/generalGraphModuleAbsBetweenness2.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._absoluteBetweenness({weight : 'distance'}); -{ -} diff --git a/Documentation/Examples/generalGraphModuleAbsBetweenness3.generated b/Documentation/Examples/generalGraphModuleAbsBetweenness3.generated deleted file mode 100644 index 960fd4cc3410..000000000000 --- a/Documentation/Examples/generalGraphModuleAbsBetweenness3.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._absoluteBetweenness({direction : 'outbound', weight : 'distance'}); -{ -} diff --git a/Documentation/Examples/generalGraphModuleAbsCloseness1.generated b/Documentation/Examples/generalGraphModuleAbsCloseness1.generated deleted file mode 100644 index 80d51db4bfa9..000000000000 --- a/Documentation/Examples/generalGraphModuleAbsCloseness1.generated +++ /dev/null @@ -1,10 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._absoluteCloseness({}); -{ - "frenchCity/Lyon" : 4, - "frenchCity/Paris" : 4, - "germanCity/Cologne" : 4, - "germanCity/Hamburg" : 4, - "germanCity/Berlin" : 4 -} diff --git a/Documentation/Examples/generalGraphModuleAbsCloseness2.generated b/Documentation/Examples/generalGraphModuleAbsCloseness2.generated deleted file mode 100644 index f8c59e6bcd38..000000000000 --- a/Documentation/Examples/generalGraphModuleAbsCloseness2.generated +++ /dev/null @@ -1,10 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._absoluteCloseness({}, {weight : 'distance'}); -{ - "frenchCity/Lyon" : 4, - "frenchCity/Paris" : 4, - "germanCity/Cologne" : 4, - "germanCity/Hamburg" : 4, - "germanCity/Berlin" : 4 -} diff --git a/Documentation/Examples/generalGraphModuleAbsCloseness3.generated b/Documentation/Examples/generalGraphModuleAbsCloseness3.generated deleted file mode 100644 index d1ce693521ab..000000000000 --- a/Documentation/Examples/generalGraphModuleAbsCloseness3.generated +++ /dev/null @@ -1,11 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._absoluteCloseness({}, {startVertexCollectionRestriction : 'germanCity', -........> direction : 'outbound', weight : 'distance'}); -{ - "frenchCity/Lyon" : 0, - "frenchCity/Paris" : 1, - "germanCity/Cologne" : 2, - "germanCity/Hamburg" : 3, - "germanCity/Berlin" : 4 -} diff --git a/Documentation/Examples/generalGraphModuleAbsEccentricity1.generated b/Documentation/Examples/generalGraphModuleAbsEccentricity1.generated deleted file mode 100644 index 97fd06ba4f2b..000000000000 --- a/Documentation/Examples/generalGraphModuleAbsEccentricity1.generated +++ /dev/null @@ -1,10 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._absoluteEccentricity({}); -{ - "frenchCity/Lyon" : 1, - "frenchCity/Paris" : 1, - "germanCity/Cologne" : 1, - "germanCity/Hamburg" : 1, - "germanCity/Berlin" : 1 -} diff --git a/Documentation/Examples/generalGraphModuleAbsEccentricity2.generated b/Documentation/Examples/generalGraphModuleAbsEccentricity2.generated deleted file mode 100644 index 17883f18c902..000000000000 --- a/Documentation/Examples/generalGraphModuleAbsEccentricity2.generated +++ /dev/null @@ -1,10 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._absoluteEccentricity({}, {weight : 'distance'}); -{ - "frenchCity/Lyon" : 1, - "frenchCity/Paris" : 1, - "germanCity/Cologne" : 1, - "germanCity/Hamburg" : 1, - "germanCity/Berlin" : 1 -} diff --git a/Documentation/Examples/generalGraphModuleAbsEccentricity3.generated b/Documentation/Examples/generalGraphModuleAbsEccentricity3.generated deleted file mode 100644 index d0a0e23dc8a5..000000000000 --- a/Documentation/Examples/generalGraphModuleAbsEccentricity3.generated +++ /dev/null @@ -1,11 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._absoluteEccentricity({}, {startVertexCollectionRestriction : 'germanCity', -........> direction : 'outbound', weight : 'distance'}); -{ - "frenchCity/Lyon" : 0, - "frenchCity/Paris" : 1, - "germanCity/Cologne" : 1, - "germanCity/Hamburg" : 1, - "germanCity/Berlin" : 1 -} diff --git a/Documentation/Examples/generalGraphModuleAmountProperties1.generated b/Documentation/Examples/generalGraphModuleAmountProperties1.generated deleted file mode 100644 index 7e96528ae063..000000000000 --- a/Documentation/Examples/generalGraphModuleAmountProperties1.generated +++ /dev/null @@ -1,20 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._countCommonProperties({}, {}); -[ - { - "frenchCity/Lyon" : 2 - }, - { - "frenchCity/Paris" : 1 - }, - { - "germanCity/Berlin" : 1 - }, - { - "germanCity/Cologne" : 2 - }, - { - "germanCity/Hamburg" : 2 - } -] diff --git a/Documentation/Examples/generalGraphModuleAmountProperties2.generated b/Documentation/Examples/generalGraphModuleAmountProperties2.generated deleted file mode 100644 index f872474d898c..000000000000 --- a/Documentation/Examples/generalGraphModuleAmountProperties2.generated +++ /dev/null @@ -1,21 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._countCommonProperties({}, {}, {vertex1CollectionRestriction : 'germanCity', -........> vertex2CollectionRestriction : 'germanCity' ,ignoreProperties: 'population'}); -[ - { - "frenchCity/Lyon" : 2 - }, - { - "frenchCity/Paris" : 1 - }, - { - "germanCity/Berlin" : 1 - }, - { - "germanCity/Cologne" : 2 - }, - { - "germanCity/Hamburg" : 2 - } -] diff --git a/Documentation/Examples/generalGraphModuleBetweenness1.generated b/Documentation/Examples/generalGraphModuleBetweenness1.generated deleted file mode 100644 index 947ea9271984..000000000000 --- a/Documentation/Examples/generalGraphModuleBetweenness1.generated +++ /dev/null @@ -1,10 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._betweenness(); -{ - "frenchCity/Lyon" : 0, - "frenchCity/Paris" : 0, - "germanCity/Cologne" : 0, - "germanCity/Hamburg" : 0, - "germanCity/Berlin" : 0 -} diff --git a/Documentation/Examples/generalGraphModuleBetweenness2.generated b/Documentation/Examples/generalGraphModuleBetweenness2.generated deleted file mode 100644 index 13a442d574a6..000000000000 --- a/Documentation/Examples/generalGraphModuleBetweenness2.generated +++ /dev/null @@ -1,10 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._betweenness({weight : 'distance'}); -{ - "frenchCity/Lyon" : 0, - "frenchCity/Paris" : 0, - "germanCity/Cologne" : 0, - "germanCity/Hamburg" : 0, - "germanCity/Berlin" : 0 -} diff --git a/Documentation/Examples/generalGraphModuleBetweenness3.generated b/Documentation/Examples/generalGraphModuleBetweenness3.generated deleted file mode 100644 index 60935cacd6f5..000000000000 --- a/Documentation/Examples/generalGraphModuleBetweenness3.generated +++ /dev/null @@ -1,10 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._betweenness({direction : 'outbound', weight : 'distance'}); -{ - "frenchCity/Lyon" : 0, - "frenchCity/Paris" : 0, - "germanCity/Cologne" : 0, - "germanCity/Hamburg" : 0, - "germanCity/Berlin" : 0 -} diff --git a/Documentation/Examples/generalGraphModuleCloseness1.generated b/Documentation/Examples/generalGraphModuleCloseness1.generated deleted file mode 100644 index 5c54bf8705cd..000000000000 --- a/Documentation/Examples/generalGraphModuleCloseness1.generated +++ /dev/null @@ -1,10 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._closeness(); -{ - "frenchCity/Lyon" : 1, - "frenchCity/Paris" : 1, - "germanCity/Cologne" : 1, - "germanCity/Hamburg" : 1, - "germanCity/Berlin" : 1 -} diff --git a/Documentation/Examples/generalGraphModuleCloseness2.generated b/Documentation/Examples/generalGraphModuleCloseness2.generated deleted file mode 100644 index c6e06acb332c..000000000000 --- a/Documentation/Examples/generalGraphModuleCloseness2.generated +++ /dev/null @@ -1,10 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._closeness({weight : 'distance'}); -{ - "frenchCity/Lyon" : 1, - "frenchCity/Paris" : 1, - "germanCity/Cologne" : 1, - "germanCity/Hamburg" : 1, - "germanCity/Berlin" : 1 -} diff --git a/Documentation/Examples/generalGraphModuleCloseness3.generated b/Documentation/Examples/generalGraphModuleCloseness3.generated deleted file mode 100644 index 6107d2b71676..000000000000 --- a/Documentation/Examples/generalGraphModuleCloseness3.generated +++ /dev/null @@ -1,10 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._closeness({direction : 'outbound', weight : 'distance'}); -{ - "frenchCity/Lyon" : 0, - "frenchCity/Paris" : 1, - "germanCity/Cologne" : 0.5, - "germanCity/Hamburg" : 0.3333333333333333, - "germanCity/Berlin" : 0.25 -} diff --git a/Documentation/Examples/generalGraphModuleCommonNeighbors1.generated b/Documentation/Examples/generalGraphModuleCommonNeighbors1.generated deleted file mode 100644 index ff5b1acb76ef..000000000000 --- a/Documentation/Examples/generalGraphModuleCommonNeighbors1.generated +++ /dev/null @@ -1,23 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._commonNeighbors({isCapital : true}, {isCapital : true}); -[ - { - "left" : "frenchCity/Paris", - "right" : "germanCity/Berlin", - "neighbors" : [ - "germanCity/Hamburg", - "germanCity/Cologne", - "frenchCity/Lyon" - ] - }, - { - "left" : "germanCity/Berlin", - "right" : "frenchCity/Paris", - "neighbors" : [ - "frenchCity/Lyon", - "germanCity/Hamburg", - "germanCity/Cologne" - ] - } -] diff --git a/Documentation/Examples/generalGraphModuleCommonNeighbors2.generated b/Documentation/Examples/generalGraphModuleCommonNeighbors2.generated deleted file mode 100644 index db7becb57365..000000000000 --- a/Documentation/Examples/generalGraphModuleCommonNeighbors2.generated +++ /dev/null @@ -1,33 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._commonNeighbors( -........> 'germanCity/Hamburg', -........> {}, -........> {direction : 'outbound', maxDepth : 2}, -........> {direction : 'outbound', maxDepth : 2}); -[ - { - "left" : "germanCity/Hamburg", - "right" : "frenchCity/Paris", - "neighbors" : [ - "frenchCity/Lyon" - ] - }, - { - "left" : "germanCity/Hamburg", - "right" : "germanCity/Cologne", - "neighbors" : [ - "frenchCity/Lyon", - "frenchCity/Paris" - ] - }, - { - "left" : "germanCity/Hamburg", - "right" : "germanCity/Berlin", - "neighbors" : [ - "frenchCity/Lyon", - "frenchCity/Paris", - "germanCity/Cologne" - ] - } -] diff --git a/Documentation/Examples/generalGraphModuleCommonNeighborsAmount1.generated b/Documentation/Examples/generalGraphModuleCommonNeighborsAmount1.generated deleted file mode 100644 index 729d8a5cc423..000000000000 --- a/Documentation/Examples/generalGraphModuleCommonNeighborsAmount1.generated +++ /dev/null @@ -1,21 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> var example = { isCapital: true }; -arangosh> var options = { includeData: true }; -arangosh> graph._countCommonNeighbors(example, example, options, options); -[ - { - "frenchCity/Paris" : [ - { - "germanCity/Berlin" : 3 - } - ] - }, - { - "germanCity/Berlin" : [ - { - "frenchCity/Paris" : 3 - } - ] - } -] diff --git a/Documentation/Examples/generalGraphModuleCommonNeighborsAmount2.generated b/Documentation/Examples/generalGraphModuleCommonNeighborsAmount2.generated deleted file mode 100644 index 5e3d05140079..000000000000 --- a/Documentation/Examples/generalGraphModuleCommonNeighborsAmount2.generated +++ /dev/null @@ -1,19 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> var options = { direction: 'outbound', maxDepth: 2, includeData: true }; -arangosh> graph._countCommonNeighbors('germanCity/Hamburg', {}, options, options); -[ - { - "germanCity/Hamburg" : [ - { - "frenchCity/Paris" : 1 - }, - { - "germanCity/Cologne" : 2 - }, - { - "germanCity/Berlin" : 3 - } - ] - } -] diff --git a/Documentation/Examples/generalGraphModuleDiameter1.generated b/Documentation/Examples/generalGraphModuleDiameter1.generated deleted file mode 100644 index 4109c40be762..000000000000 --- a/Documentation/Examples/generalGraphModuleDiameter1.generated +++ /dev/null @@ -1,4 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._diameter(); -1 diff --git a/Documentation/Examples/generalGraphModuleDiameter2.generated b/Documentation/Examples/generalGraphModuleDiameter2.generated deleted file mode 100644 index 81d3e3f0e083..000000000000 --- a/Documentation/Examples/generalGraphModuleDiameter2.generated +++ /dev/null @@ -1,4 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._diameter({weight : 'distance'}); -1 diff --git a/Documentation/Examples/generalGraphModuleDiameter3.generated b/Documentation/Examples/generalGraphModuleDiameter3.generated deleted file mode 100644 index a614b0ba9378..000000000000 --- a/Documentation/Examples/generalGraphModuleDiameter3.generated +++ /dev/null @@ -1,4 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._diameter({direction : 'outbound', weight : 'distance'}); -1 diff --git a/Documentation/Examples/generalGraphModuleDistanceTo1.generated b/Documentation/Examples/generalGraphModuleDistanceTo1.generated deleted file mode 100644 index 008f9768df77..000000000000 --- a/Documentation/Examples/generalGraphModuleDistanceTo1.generated +++ /dev/null @@ -1,106 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var g = examples.loadGraph("routeplanner"); -arangosh> g._distanceTo({}, {}, {weight : 'distance', endVertexCollectionRestriction : 'frenchCity', -........> startVertexCollectionRestriction : 'germanCity'}); -[ - { - "startVertex" : "frenchCity/Lyon", - "vertex" : "frenchCity/Paris", - "distance" : 1 - }, - { - "startVertex" : "frenchCity/Lyon", - "vertex" : "germanCity/Cologne", - "distance" : 1 - }, - { - "startVertex" : "frenchCity/Lyon", - "vertex" : "germanCity/Hamburg", - "distance" : 1 - }, - { - "startVertex" : "frenchCity/Lyon", - "vertex" : "germanCity/Berlin", - "distance" : 1 - }, - { - "startVertex" : "frenchCity/Paris", - "vertex" : "frenchCity/Lyon", - "distance" : 1 - }, - { - "startVertex" : "frenchCity/Paris", - "vertex" : "germanCity/Cologne", - "distance" : 1 - }, - { - "startVertex" : "frenchCity/Paris", - "vertex" : "germanCity/Hamburg", - "distance" : 1 - }, - { - "startVertex" : "frenchCity/Paris", - "vertex" : "germanCity/Berlin", - "distance" : 1 - }, - { - "startVertex" : "germanCity/Cologne", - "vertex" : "frenchCity/Lyon", - "distance" : 1 - }, - { - "startVertex" : "germanCity/Cologne", - "vertex" : "frenchCity/Paris", - "distance" : 1 - }, - { - "startVertex" : "germanCity/Cologne", - "vertex" : "germanCity/Hamburg", - "distance" : 1 - }, - { - "startVertex" : "germanCity/Cologne", - "vertex" : "germanCity/Berlin", - "distance" : 1 - }, - { - "startVertex" : "germanCity/Hamburg", - "vertex" : "frenchCity/Lyon", - "distance" : 1 - }, - { - "startVertex" : "germanCity/Hamburg", - "vertex" : "frenchCity/Paris", - "distance" : 1 - }, - { - "startVertex" : "germanCity/Hamburg", - "vertex" : "germanCity/Cologne", - "distance" : 1 - }, - { - "startVertex" : "germanCity/Hamburg", - "vertex" : "germanCity/Berlin", - "distance" : 1 - }, - { - "startVertex" : "germanCity/Berlin", - "vertex" : "frenchCity/Lyon", - "distance" : 1 - }, - { - "startVertex" : "germanCity/Berlin", - "vertex" : "frenchCity/Paris", - "distance" : 1 - }, - { - "startVertex" : "germanCity/Berlin", - "vertex" : "germanCity/Cologne", - "distance" : 1 - }, - { - "startVertex" : "germanCity/Berlin", - "vertex" : "germanCity/Hamburg", - "distance" : 1 - } -] diff --git a/Documentation/Examples/generalGraphModuleDistanceTo2.generated b/Documentation/Examples/generalGraphModuleDistanceTo2.generated deleted file mode 100644 index f74b269d79a6..000000000000 --- a/Documentation/Examples/generalGraphModuleDistanceTo2.generated +++ /dev/null @@ -1,11 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var g = examples.loadGraph("routeplanner"); -arangosh> g._distanceTo([{_id: 'germanCity/Cologne'},{_id: 'germanCity/Munich'}], 'frenchCity/Lyon', -........> {weight : 'distance'}); -[ - { - "startVertex" : "germanCity/Cologne", - "vertex" : "frenchCity/Lyon", - "distance" : 1 - } -] diff --git a/Documentation/Examples/generalGraphModuleEccentricity2.generated b/Documentation/Examples/generalGraphModuleEccentricity2.generated deleted file mode 100644 index 03e4af117f16..000000000000 --- a/Documentation/Examples/generalGraphModuleEccentricity2.generated +++ /dev/null @@ -1,10 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._eccentricity(); -{ - "frenchCity/Lyon" : 1, - "frenchCity/Paris" : 1, - "germanCity/Cologne" : 1, - "germanCity/Hamburg" : 1, - "germanCity/Berlin" : 1 -} diff --git a/Documentation/Examples/generalGraphModuleEccentricity3.generated b/Documentation/Examples/generalGraphModuleEccentricity3.generated deleted file mode 100644 index 4542c7cb5cd5..000000000000 --- a/Documentation/Examples/generalGraphModuleEccentricity3.generated +++ /dev/null @@ -1,10 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._eccentricity({weight : 'distance'}); -{ - "frenchCity/Lyon" : 1, - "frenchCity/Paris" : 1, - "germanCity/Cologne" : 1, - "germanCity/Hamburg" : 1, - "germanCity/Berlin" : 1 -} diff --git a/Documentation/Examples/generalGraphModuleNeighbors1.generated b/Documentation/Examples/generalGraphModuleNeighbors1.generated deleted file mode 100644 index 5ca71a05adaf..000000000000 --- a/Documentation/Examples/generalGraphModuleNeighbors1.generated +++ /dev/null @@ -1,10 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._neighbors({isCapital : true}); -[ - "frenchCity/Lyon", - "germanCity/Berlin", - "germanCity/Cologne", - "germanCity/Hamburg", - "frenchCity/Paris" -] diff --git a/Documentation/Examples/generalGraphModuleNeighbors2.generated b/Documentation/Examples/generalGraphModuleNeighbors2.generated deleted file mode 100644 index bd9737e9301b..000000000000 --- a/Documentation/Examples/generalGraphModuleNeighbors2.generated +++ /dev/null @@ -1,8 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._neighbors('germanCity/Hamburg', {direction : 'outbound', maxDepth : 2}); -[ - "germanCity/Cologne", - "frenchCity/Paris", - "frenchCity/Lyon" -] diff --git a/Documentation/Examples/generalGraphModulePaths.generated b/Documentation/Examples/generalGraphModulePaths.generated deleted file mode 100644 index 4a0f7f01f697..000000000000 --- a/Documentation/Examples/generalGraphModulePaths.generated +++ /dev/null @@ -1,355 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var g = examples.loadGraph("social"); -arangosh> g._paths(); -[ - [ - { - "vertices" : [ - { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "24344", - "name" : "Diana" - } - ], - "edges" : [ ], - "source" : { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "24344", - "name" : "Diana" - }, - "destination" : { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "24344", - "name" : "Diana" - } - }, - { - "vertices" : [ - { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "24332", - "name" : "Alice" - } - ], - "edges" : [ ], - "source" : { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "24332", - "name" : "Alice" - }, - "destination" : { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "24332", - "name" : "Alice" - } - }, - { - "vertices" : [ - { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "24332", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "24337", - "name" : "Bob" - } - ], - "edges" : [ - { - "_key" : "aliceAndBob", - "_id" : "relation/aliceAndBob", - "_from" : "female/alice", - "_to" : "male/bob", - "_rev" : "24352", - "type" : "married" - } - ], - "source" : { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "24332", - "name" : "Alice" - }, - "destination" : { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "24337", - "name" : "Bob" - } - }, - { - "vertices" : [ - { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "24332", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "24337", - "name" : "Bob" - }, - { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "24344", - "name" : "Diana" - } - ], - "edges" : [ - { - "_key" : "aliceAndBob", - "_id" : "relation/aliceAndBob", - "_from" : "female/alice", - "_to" : "male/bob", - "_rev" : "24352", - "type" : "married" - }, - { - "_key" : "bobAndDiana", - "_id" : "relation/bobAndDiana", - "_from" : "male/bob", - "_to" : "female/diana", - "_rev" : "24367", - "type" : "friend" - } - ], - "source" : { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "24332", - "name" : "Alice" - }, - "destination" : { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "24344", - "name" : "Diana" - } - }, - { - "vertices" : [ - { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "24332", - "name" : "Alice" - }, - { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "24341", - "name" : "Charly" - } - ], - "edges" : [ - { - "_key" : "aliceAndCharly", - "_id" : "relation/aliceAndCharly", - "_from" : "female/alice", - "_to" : "male/charly", - "_rev" : "24360", - "type" : "friend" - } - ], - "source" : { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "24332", - "name" : "Alice" - }, - "destination" : { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "24341", - "name" : "Charly" - } - }, - { - "vertices" : [ - { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "24332", - "name" : "Alice" - }, - { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "24341", - "name" : "Charly" - }, - { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "24344", - "name" : "Diana" - } - ], - "edges" : [ - { - "_key" : "aliceAndCharly", - "_id" : "relation/aliceAndCharly", - "_from" : "female/alice", - "_to" : "male/charly", - "_rev" : "24360", - "type" : "friend" - }, - { - "_key" : "charlyAndDiana", - "_id" : "relation/charlyAndDiana", - "_from" : "male/charly", - "_to" : "female/diana", - "_rev" : "24364", - "type" : "married" - } - ], - "source" : { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "24332", - "name" : "Alice" - }, - "destination" : { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "24344", - "name" : "Diana" - } - }, - { - "vertices" : [ - { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "24337", - "name" : "Bob" - } - ], - "edges" : [ ], - "source" : { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "24337", - "name" : "Bob" - }, - "destination" : { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "24337", - "name" : "Bob" - } - }, - { - "vertices" : [ - { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "24337", - "name" : "Bob" - }, - { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "24344", - "name" : "Diana" - } - ], - "edges" : [ - { - "_key" : "bobAndDiana", - "_id" : "relation/bobAndDiana", - "_from" : "male/bob", - "_to" : "female/diana", - "_rev" : "24367", - "type" : "friend" - } - ], - "source" : { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "24337", - "name" : "Bob" - }, - "destination" : { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "24344", - "name" : "Diana" - } - }, - { - "vertices" : [ - { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "24341", - "name" : "Charly" - } - ], - "edges" : [ ], - "source" : { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "24341", - "name" : "Charly" - }, - "destination" : { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "24341", - "name" : "Charly" - } - }, - { - "vertices" : [ - { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "24341", - "name" : "Charly" - }, - { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "24344", - "name" : "Diana" - } - ], - "edges" : [ - { - "_key" : "charlyAndDiana", - "_id" : "relation/charlyAndDiana", - "_from" : "male/charly", - "_to" : "female/diana", - "_rev" : "24364", - "type" : "married" - } - ], - "source" : { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "24341", - "name" : "Charly" - }, - "destination" : { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "24344", - "name" : "Diana" - } - } - ] -] diff --git a/Documentation/Examples/generalGraphModulePaths1.generated b/Documentation/Examples/generalGraphModulePaths1.generated deleted file mode 100644 index d055ba6f7bcd..000000000000 --- a/Documentation/Examples/generalGraphModulePaths1.generated +++ /dev/null @@ -1,361 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var g = examples.loadGraph("social"); -arangosh> g._paths(); -[ - { - "source" : { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "_YOn1kpa--F", - "name" : "Diana" - }, - "destination" : { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "_YOn1kpa--F", - "name" : "Diana" - }, - "edges" : [ ], - "vertice" : [ - { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "_YOn1kpa--F", - "name" : "Diana" - } - ] - }, - { - "source" : { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1kpa--_", - "name" : "Alice" - }, - "destination" : { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1kpa--_", - "name" : "Alice" - }, - "edges" : [ ], - "vertice" : [ - { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1kpa--_", - "name" : "Alice" - } - ] - }, - { - "source" : { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1kpa--_", - "name" : "Alice" - }, - "destination" : { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "_YOn1kpa--B", - "name" : "Bob" - }, - "edges" : [ - { - "_key" : "116262", - "_id" : "relation/116262", - "_from" : "female/alice", - "_to" : "male/bob", - "_rev" : "_YOn1kpa--H", - "type" : "married", - "vertex" : "alice" - } - ], - "vertice" : [ - { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1kpa--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "_YOn1kpa--B", - "name" : "Bob" - } - ] - }, - { - "source" : { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1kpa--_", - "name" : "Alice" - }, - "destination" : { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "_YOn1kpa--F", - "name" : "Diana" - }, - "edges" : [ - { - "_key" : "116262", - "_id" : "relation/116262", - "_from" : "female/alice", - "_to" : "male/bob", - "_rev" : "_YOn1kpa--H", - "type" : "married", - "vertex" : "alice" - }, - { - "_key" : "116272", - "_id" : "relation/116272", - "_from" : "male/bob", - "_to" : "female/diana", - "_rev" : "_YOn1kpe--D", - "type" : "friend", - "vertex" : "bob" - } - ], - "vertice" : [ - { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1kpa--_", - "name" : "Alice" - }, - { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "_YOn1kpa--B", - "name" : "Bob" - }, - { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "_YOn1kpa--F", - "name" : "Diana" - } - ] - }, - { - "source" : { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1kpa--_", - "name" : "Alice" - }, - "destination" : { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "_YOn1kpa--D", - "name" : "Charly" - }, - "edges" : [ - { - "_key" : "116266", - "_id" : "relation/116266", - "_from" : "female/alice", - "_to" : "male/charly", - "_rev" : "_YOn1kpe--_", - "type" : "friend", - "vertex" : "alice" - } - ], - "vertice" : [ - { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1kpa--_", - "name" : "Alice" - }, - { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "_YOn1kpa--D", - "name" : "Charly" - } - ] - }, - { - "source" : { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1kpa--_", - "name" : "Alice" - }, - "destination" : { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "_YOn1kpa--F", - "name" : "Diana" - }, - "edges" : [ - { - "_key" : "116266", - "_id" : "relation/116266", - "_from" : "female/alice", - "_to" : "male/charly", - "_rev" : "_YOn1kpe--_", - "type" : "friend", - "vertex" : "alice" - }, - { - "_key" : "116269", - "_id" : "relation/116269", - "_from" : "male/charly", - "_to" : "female/diana", - "_rev" : "_YOn1kpe--B", - "type" : "married", - "vertex" : "charly" - } - ], - "vertice" : [ - { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1kpa--_", - "name" : "Alice" - }, - { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "_YOn1kpa--D", - "name" : "Charly" - }, - { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "_YOn1kpa--F", - "name" : "Diana" - } - ] - }, - { - "source" : { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "_YOn1kpa--B", - "name" : "Bob" - }, - "destination" : { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "_YOn1kpa--B", - "name" : "Bob" - }, - "edges" : [ ], - "vertice" : [ - { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "_YOn1kpa--B", - "name" : "Bob" - } - ] - }, - { - "source" : { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "_YOn1kpa--B", - "name" : "Bob" - }, - "destination" : { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "_YOn1kpa--F", - "name" : "Diana" - }, - "edges" : [ - { - "_key" : "116272", - "_id" : "relation/116272", - "_from" : "male/bob", - "_to" : "female/diana", - "_rev" : "_YOn1kpe--D", - "type" : "friend", - "vertex" : "bob" - } - ], - "vertice" : [ - { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "_YOn1kpa--B", - "name" : "Bob" - }, - { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "_YOn1kpa--F", - "name" : "Diana" - } - ] - }, - { - "source" : { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "_YOn1kpa--D", - "name" : "Charly" - }, - "destination" : { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "_YOn1kpa--D", - "name" : "Charly" - }, - "edges" : [ ], - "vertice" : [ - { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "_YOn1kpa--D", - "name" : "Charly" - } - ] - }, - { - "source" : { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "_YOn1kpa--D", - "name" : "Charly" - }, - "destination" : { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "_YOn1kpa--F", - "name" : "Diana" - }, - "edges" : [ - { - "_key" : "116269", - "_id" : "relation/116269", - "_from" : "male/charly", - "_to" : "female/diana", - "_rev" : "_YOn1kpe--B", - "type" : "married", - "vertex" : "charly" - } - ], - "vertice" : [ - { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "_YOn1kpa--D", - "name" : "Charly" - }, - { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "_YOn1kpa--F", - "name" : "Diana" - } - ] - } -] diff --git a/Documentation/Examples/generalGraphModulePaths2.generated b/Documentation/Examples/generalGraphModulePaths2.generated deleted file mode 100644 index 281ba5844326..000000000000 --- a/Documentation/Examples/generalGraphModulePaths2.generated +++ /dev/null @@ -1,269 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var g = examples.loadGraph("social"); -arangosh> g._paths({direction : 'inbound', minLength : 1, maxLength : 2}); -[ - { - "source" : { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "_YOn1ktC--D", - "name" : "Diana" - }, - "destination" : { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "_YOn1ktC--B", - "name" : "Charly" - }, - "edges" : [ - { - "_key" : "116343", - "_id" : "relation/116343", - "_from" : "male/charly", - "_to" : "female/diana", - "_rev" : "_YOn1ktG--D", - "type" : "married", - "vertex" : "charly" - } - ], - "vertice" : [ - { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "_YOn1ktC--D", - "name" : "Diana" - }, - { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "_YOn1ktC--B", - "name" : "Charly" - } - ] - }, - { - "source" : { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "_YOn1ktC--D", - "name" : "Diana" - }, - "destination" : { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1kt---_", - "name" : "Alice" - }, - "edges" : [ - { - "_key" : "116343", - "_id" : "relation/116343", - "_from" : "male/charly", - "_to" : "female/diana", - "_rev" : "_YOn1ktG--D", - "type" : "married", - "vertex" : "charly" - }, - { - "_key" : "116340", - "_id" : "relation/116340", - "_from" : "female/alice", - "_to" : "male/charly", - "_rev" : "_YOn1ktG--B", - "type" : "friend", - "vertex" : "alice" - } - ], - "vertice" : [ - { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "_YOn1ktC--D", - "name" : "Diana" - }, - { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "_YOn1ktC--B", - "name" : "Charly" - }, - { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1kt---_", - "name" : "Alice" - } - ] - }, - { - "source" : { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "_YOn1ktC--D", - "name" : "Diana" - }, - "destination" : { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "_YOn1ktC--_", - "name" : "Bob" - }, - "edges" : [ - { - "_key" : "116346", - "_id" : "relation/116346", - "_from" : "male/bob", - "_to" : "female/diana", - "_rev" : "_YOn1ktG--F", - "type" : "friend", - "vertex" : "bob" - } - ], - "vertice" : [ - { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "_YOn1ktC--D", - "name" : "Diana" - }, - { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "_YOn1ktC--_", - "name" : "Bob" - } - ] - }, - { - "source" : { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "_YOn1ktC--D", - "name" : "Diana" - }, - "destination" : { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1kt---_", - "name" : "Alice" - }, - "edges" : [ - { - "_key" : "116346", - "_id" : "relation/116346", - "_from" : "male/bob", - "_to" : "female/diana", - "_rev" : "_YOn1ktG--F", - "type" : "friend", - "vertex" : "bob" - }, - { - "_key" : "116336", - "_id" : "relation/116336", - "_from" : "female/alice", - "_to" : "male/bob", - "_rev" : "_YOn1ktG--_", - "type" : "married", - "vertex" : "alice" - } - ], - "vertice" : [ - { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "_YOn1ktC--D", - "name" : "Diana" - }, - { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "_YOn1ktC--_", - "name" : "Bob" - }, - { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1kt---_", - "name" : "Alice" - } - ] - }, - { - "source" : { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "_YOn1ktC--_", - "name" : "Bob" - }, - "destination" : { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1kt---_", - "name" : "Alice" - }, - "edges" : [ - { - "_key" : "116336", - "_id" : "relation/116336", - "_from" : "female/alice", - "_to" : "male/bob", - "_rev" : "_YOn1ktG--_", - "type" : "married", - "vertex" : "alice" - } - ], - "vertice" : [ - { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "_YOn1ktC--_", - "name" : "Bob" - }, - { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1kt---_", - "name" : "Alice" - } - ] - }, - { - "source" : { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "_YOn1ktC--B", - "name" : "Charly" - }, - "destination" : { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1kt---_", - "name" : "Alice" - }, - "edges" : [ - { - "_key" : "116340", - "_id" : "relation/116340", - "_from" : "female/alice", - "_to" : "male/charly", - "_rev" : "_YOn1ktG--B", - "type" : "friend", - "vertex" : "alice" - } - ], - "vertice" : [ - { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "_YOn1ktC--B", - "name" : "Charly" - }, - { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1kt---_", - "name" : "Alice" - } - ] - } -] diff --git a/Documentation/Examples/generalGraphModuleProperties1.generated b/Documentation/Examples/generalGraphModuleProperties1.generated deleted file mode 100644 index a9872777fca8..000000000000 --- a/Documentation/Examples/generalGraphModuleProperties1.generated +++ /dev/null @@ -1,59 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._commonProperties({}, {}); -[ - { - "frenchCity/Lyon" : [ - { - "_id" : "germanCity/Cologne", - "isCapital" : false - }, - { - "_id" : "germanCity/Hamburg", - "isCapital" : false - } - ] - }, - { - "frenchCity/Paris" : [ - { - "_id" : "germanCity/Berlin", - "isCapital" : true - } - ] - }, - { - "germanCity/Berlin" : [ - { - "_id" : "frenchCity/Paris", - "isCapital" : true - } - ] - }, - { - "germanCity/Cologne" : [ - { - "_id" : "frenchCity/Lyon", - "isCapital" : false - }, - { - "_id" : "germanCity/Hamburg", - "isCapital" : false, - "population" : 1000000 - } - ] - }, - { - "germanCity/Hamburg" : [ - { - "_id" : "frenchCity/Lyon", - "isCapital" : false - }, - { - "_id" : "germanCity/Cologne", - "isCapital" : false, - "population" : 1000000 - } - ] - } -] diff --git a/Documentation/Examples/generalGraphModuleProperties2.generated b/Documentation/Examples/generalGraphModuleProperties2.generated deleted file mode 100644 index 9abdd74cb669..000000000000 --- a/Documentation/Examples/generalGraphModuleProperties2.generated +++ /dev/null @@ -1,57 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._commonProperties({}, {}, {ignoreProperties: 'population'}); -[ - { - "frenchCity/Lyon" : [ - { - "_id" : "germanCity/Cologne", - "isCapital" : false - }, - { - "_id" : "germanCity/Hamburg", - "isCapital" : false - } - ] - }, - { - "frenchCity/Paris" : [ - { - "_id" : "germanCity/Berlin", - "isCapital" : true - } - ] - }, - { - "germanCity/Berlin" : [ - { - "_id" : "frenchCity/Paris", - "isCapital" : true - } - ] - }, - { - "germanCity/Cologne" : [ - { - "_id" : "frenchCity/Lyon", - "isCapital" : false - }, - { - "_id" : "germanCity/Hamburg", - "isCapital" : false - } - ] - }, - { - "germanCity/Hamburg" : [ - { - "_id" : "frenchCity/Lyon", - "isCapital" : false - }, - { - "_id" : "germanCity/Cologne", - "isCapital" : false - } - ] - } -] diff --git a/Documentation/Examples/generalGraphModuleRadius1.generated b/Documentation/Examples/generalGraphModuleRadius1.generated deleted file mode 100644 index 11001e74d829..000000000000 --- a/Documentation/Examples/generalGraphModuleRadius1.generated +++ /dev/null @@ -1,4 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._radius(); -1 diff --git a/Documentation/Examples/generalGraphModuleRadius2.generated b/Documentation/Examples/generalGraphModuleRadius2.generated deleted file mode 100644 index eb319fcbefa6..000000000000 --- a/Documentation/Examples/generalGraphModuleRadius2.generated +++ /dev/null @@ -1,4 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._radius({weight : 'distance'}); -1 diff --git a/Documentation/Examples/generalGraphModuleRadius3.generated b/Documentation/Examples/generalGraphModuleRadius3.generated deleted file mode 100644 index c409a68ca1ed..000000000000 --- a/Documentation/Examples/generalGraphModuleRadius3.generated +++ /dev/null @@ -1,4 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("routeplanner"); -arangosh> graph._radius({direction : 'outbound', weight : 'distance'}); -1 diff --git a/Documentation/Examples/generalGraphModuleShortestPaths1.generated b/Documentation/Examples/generalGraphModuleShortestPaths1.generated deleted file mode 100644 index 4ddcf5b3f3ff..000000000000 --- a/Documentation/Examples/generalGraphModuleShortestPaths1.generated +++ /dev/null @@ -1,346 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var g = examples.loadGraph("routeplanner"); -arangosh> g._shortestPath({}, {}, {weight : 'distance', endVertexCollectionRestriction : 'frenchCity', -........> startVertexCollectionRestriction : 'germanCity'}); -[ - { - "vertices" : [ - "frenchCity/Lyon", - "frenchCity/Paris" - ], - "edges" : [ - { - "_key" : "117022", - "_id" : "frenchHighway/117022", - "_from" : "frenchCity/Paris", - "_to" : "frenchCity/Lyon", - "_rev" : "_YOn1lYC--D", - "distance" : 550 - } - ], - "distance" : 1 - }, - { - "vertices" : [ - "frenchCity/Lyon", - "germanCity/Cologne" - ], - "edges" : [ - { - "_key" : "117039", - "_id" : "internationalHighway/117039", - "_from" : "germanCity/Cologne", - "_to" : "frenchCity/Lyon", - "_rev" : "_YOn1lYG--D", - "distance" : 700 - } - ], - "distance" : 1 - }, - { - "vertices" : [ - "frenchCity/Lyon", - "germanCity/Hamburg" - ], - "edges" : [ - { - "_key" : "117036", - "_id" : "internationalHighway/117036", - "_from" : "germanCity/Hamburg", - "_to" : "frenchCity/Lyon", - "_rev" : "_YOn1lYG--B", - "distance" : 1300 - } - ], - "distance" : 1 - }, - { - "vertices" : [ - "frenchCity/Lyon", - "germanCity/Berlin" - ], - "edges" : [ - { - "_key" : "117026", - "_id" : "internationalHighway/117026", - "_from" : "germanCity/Berlin", - "_to" : "frenchCity/Lyon", - "_rev" : "_YOn1lYC--F", - "distance" : 1100 - } - ], - "distance" : 1 - }, - { - "vertices" : [ - "frenchCity/Paris", - "frenchCity/Lyon" - ], - "edges" : [ - { - "_key" : "117022", - "_id" : "frenchHighway/117022", - "_from" : "frenchCity/Paris", - "_to" : "frenchCity/Lyon", - "_rev" : "_YOn1lYC--D", - "distance" : 550 - } - ], - "distance" : 1 - }, - { - "vertices" : [ - "frenchCity/Paris", - "germanCity/Cologne" - ], - "edges" : [ - { - "_key" : "117042", - "_id" : "internationalHighway/117042", - "_from" : "germanCity/Cologne", - "_to" : "frenchCity/Paris", - "_rev" : "_YOn1lYG--F", - "distance" : 550 - } - ], - "distance" : 1 - }, - { - "vertices" : [ - "frenchCity/Paris", - "germanCity/Hamburg" - ], - "edges" : [ - { - "_key" : "117033", - "_id" : "internationalHighway/117033", - "_from" : "germanCity/Hamburg", - "_to" : "frenchCity/Paris", - "_rev" : "_YOn1lYG--_", - "distance" : 900 - } - ], - "distance" : 1 - }, - { - "vertices" : [ - "frenchCity/Paris", - "germanCity/Berlin" - ], - "edges" : [ - { - "_key" : "117030", - "_id" : "internationalHighway/117030", - "_from" : "germanCity/Berlin", - "_to" : "frenchCity/Paris", - "_rev" : "_YOn1lYC--H", - "distance" : 1200 - } - ], - "distance" : 1 - }, - { - "vertices" : [ - "germanCity/Cologne", - "frenchCity/Lyon" - ], - "edges" : [ - { - "_key" : "117039", - "_id" : "internationalHighway/117039", - "_from" : "germanCity/Cologne", - "_to" : "frenchCity/Lyon", - "_rev" : "_YOn1lYG--D", - "distance" : 700 - } - ], - "distance" : 1 - }, - { - "vertices" : [ - "germanCity/Cologne", - "frenchCity/Paris" - ], - "edges" : [ - { - "_key" : "117042", - "_id" : "internationalHighway/117042", - "_from" : "germanCity/Cologne", - "_to" : "frenchCity/Paris", - "_rev" : "_YOn1lYG--F", - "distance" : 550 - } - ], - "distance" : 1 - }, - { - "vertices" : [ - "germanCity/Cologne", - "germanCity/Hamburg" - ], - "edges" : [ - { - "_key" : "117019", - "_id" : "germanHighway/117019", - "_from" : "germanCity/Hamburg", - "_to" : "germanCity/Cologne", - "_rev" : "_YOn1lYC--B", - "distance" : 500 - } - ], - "distance" : 1 - }, - { - "vertices" : [ - "germanCity/Cologne", - "germanCity/Berlin" - ], - "edges" : [ - { - "_key" : "117012", - "_id" : "germanHighway/117012", - "_from" : "germanCity/Berlin", - "_to" : "germanCity/Cologne", - "_rev" : "_YOn1lY---_", - "distance" : 850 - } - ], - "distance" : 1 - }, - { - "vertices" : [ - "germanCity/Hamburg", - "frenchCity/Lyon" - ], - "edges" : [ - { - "_key" : "117036", - "_id" : "internationalHighway/117036", - "_from" : "germanCity/Hamburg", - "_to" : "frenchCity/Lyon", - "_rev" : "_YOn1lYG--B", - "distance" : 1300 - } - ], - "distance" : 1 - }, - { - "vertices" : [ - "germanCity/Hamburg", - "frenchCity/Paris" - ], - "edges" : [ - { - "_key" : "117033", - "_id" : "internationalHighway/117033", - "_from" : "germanCity/Hamburg", - "_to" : "frenchCity/Paris", - "_rev" : "_YOn1lYG--_", - "distance" : 900 - } - ], - "distance" : 1 - }, - { - "vertices" : [ - "germanCity/Hamburg", - "germanCity/Cologne" - ], - "edges" : [ - { - "_key" : "117019", - "_id" : "germanHighway/117019", - "_from" : "germanCity/Hamburg", - "_to" : "germanCity/Cologne", - "_rev" : "_YOn1lYC--B", - "distance" : 500 - } - ], - "distance" : 1 - }, - { - "vertices" : [ - "germanCity/Hamburg", - "germanCity/Berlin" - ], - "edges" : [ - { - "_key" : "117016", - "_id" : "germanHighway/117016", - "_from" : "germanCity/Berlin", - "_to" : "germanCity/Hamburg", - "_rev" : "_YOn1lYC--_", - "distance" : 400 - } - ], - "distance" : 1 - }, - { - "vertices" : [ - "germanCity/Berlin", - "frenchCity/Lyon" - ], - "edges" : [ - { - "_key" : "117026", - "_id" : "internationalHighway/117026", - "_from" : "germanCity/Berlin", - "_to" : "frenchCity/Lyon", - "_rev" : "_YOn1lYC--F", - "distance" : 1100 - } - ], - "distance" : 1 - }, - { - "vertices" : [ - "germanCity/Berlin", - "frenchCity/Paris" - ], - "edges" : [ - { - "_key" : "117030", - "_id" : "internationalHighway/117030", - "_from" : "germanCity/Berlin", - "_to" : "frenchCity/Paris", - "_rev" : "_YOn1lYC--H", - "distance" : 1200 - } - ], - "distance" : 1 - }, - { - "vertices" : [ - "germanCity/Berlin", - "germanCity/Cologne" - ], - "edges" : [ - { - "_key" : "117012", - "_id" : "germanHighway/117012", - "_from" : "germanCity/Berlin", - "_to" : "germanCity/Cologne", - "_rev" : "_YOn1lY---_", - "distance" : 850 - } - ], - "distance" : 1 - }, - { - "vertices" : [ - "germanCity/Berlin", - "germanCity/Hamburg" - ], - "edges" : [ - { - "_key" : "117016", - "_id" : "germanHighway/117016", - "_from" : "germanCity/Berlin", - "_to" : "germanCity/Hamburg", - "_rev" : "_YOn1lYC--_", - "distance" : 400 - } - ], - "distance" : 1 - } -] diff --git a/Documentation/Examples/generalGraphModuleShortestPaths2.generated b/Documentation/Examples/generalGraphModuleShortestPaths2.generated deleted file mode 100644 index 8ed14b317ac3..000000000000 --- a/Documentation/Examples/generalGraphModuleShortestPaths2.generated +++ /dev/null @@ -1,23 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var g = examples.loadGraph("routeplanner"); -arangosh> g._shortestPath([{_id: 'germanCity/Cologne'},{_id: 'germanCity/Munich'}], 'frenchCity/Lyon', -........> {weight : 'distance'}); -[ - { - "vertices" : [ - "germanCity/Cologne", - "frenchCity/Lyon" - ], - "edges" : [ - { - "_key" : "117157", - "_id" : "internationalHighway/117157", - "_from" : "germanCity/Cologne", - "_to" : "frenchCity/Lyon", - "_rev" : "_YOn1lfa--B", - "distance" : 700 - } - ], - "distance" : 1 - } -] diff --git a/Documentation/Examples/generalGraphRelationDefinition.generated b/Documentation/Examples/generalGraphRelationDefinition.generated deleted file mode 100644 index 990caf766b83..000000000000 --- a/Documentation/Examples/generalGraphRelationDefinition.generated +++ /dev/null @@ -1,13 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph"); -arangosh> graph_module._relation("has_bought", ["Customer", "Company"], ["Groceries", "Electronics"]); -{ - "collection" : "has_bought", - "from" : [ - "Customer", - "Company" - ], - "to" : [ - "Groceries", - "Electronics" - ] -} diff --git a/Documentation/Examples/generalGraphRelationDefinitionSave.generated b/Documentation/Examples/generalGraphRelationDefinitionSave.generated deleted file mode 100644 index 990caf766b83..000000000000 --- a/Documentation/Examples/generalGraphRelationDefinitionSave.generated +++ /dev/null @@ -1,13 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph"); -arangosh> graph_module._relation("has_bought", ["Customer", "Company"], ["Groceries", "Electronics"]); -{ - "collection" : "has_bought", - "from" : [ - "Customer", - "Company" - ], - "to" : [ - "Groceries", - "Electronics" - ] -} diff --git a/Documentation/Examples/generalGraphRelationDefinitionSingle.generated b/Documentation/Examples/generalGraphRelationDefinitionSingle.generated deleted file mode 100644 index 0fe2fcbba7f9..000000000000 --- a/Documentation/Examples/generalGraphRelationDefinitionSingle.generated +++ /dev/null @@ -1,11 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph"); -arangosh> graph_module._relation("has_bought", "Customer", "Product"); -{ - "collection" : "has_bought", - "from" : [ - "Customer" - ], - "to" : [ - "Product" - ] -} diff --git a/Documentation/Examples/generalGraphVertexCollectionRemove.generated b/Documentation/Examples/generalGraphVertexCollectionRemove.generated deleted file mode 100644 index e8a4817425cb..000000000000 --- a/Documentation/Examples/generalGraphVertexCollectionRemove.generated +++ /dev/null @@ -1,14 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("social"); -arangosh> graph.male.save({name: "Kermit", _key: "kermit"}); -{ - "_id" : "male/kermit", - "_key" : "kermit", - "_rev" : "_YOn1ljq--B" -} -arangosh> db._exists("male/kermit") -true -arangosh> graph.male.remove("male/kermit") -true -arangosh> db._exists("male/kermit") -false diff --git a/Documentation/Examples/generalGraphVertexCollectionReplace.generated b/Documentation/Examples/generalGraphVertexCollectionReplace.generated deleted file mode 100644 index 5a73c5918252..000000000000 --- a/Documentation/Examples/generalGraphVertexCollectionReplace.generated +++ /dev/null @@ -1,15 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("social"); -arangosh> graph.male.save({neym: "Jon", _key: "john"}); -{ - "_id" : "male/john", - "_key" : "john", - "_rev" : "_YOn1lmu--_" -} -arangosh> graph.male.replace("male/john", {name: "John"}); -{ - "_id" : "male/john", - "_key" : "john", - "_rev" : "_YOn1lmu--B", - "_oldRev" : "_YOn1lmu--_" -} diff --git a/Documentation/Examples/generalGraphVertexCollectionSave.generated b/Documentation/Examples/generalGraphVertexCollectionSave.generated deleted file mode 100644 index d59d423a808b..000000000000 --- a/Documentation/Examples/generalGraphVertexCollectionSave.generated +++ /dev/null @@ -1,8 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("social"); -arangosh> graph.male.save({name: "Floyd", _key: "floyd"}); -{ - "_id" : "male/floyd", - "_key" : "floyd", - "_rev" : "_YOn1lp2--_" -} diff --git a/Documentation/Examples/generalGraphVertexCollectionUpdate.generated b/Documentation/Examples/generalGraphVertexCollectionUpdate.generated deleted file mode 100644 index bfb359ccd498..000000000000 --- a/Documentation/Examples/generalGraphVertexCollectionUpdate.generated +++ /dev/null @@ -1,15 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("social"); -arangosh> graph.female.save({name: "Lynda", _key: "linda"}); -{ - "_id" : "female/linda", - "_key" : "linda", - "_rev" : "_YOn1ls6--H" -} -arangosh> graph.female.update("female/linda", {name: "Linda", _key: "linda"}); -{ - "_id" : "female/linda", - "_key" : "linda", - "_rev" : "_YOn1ls6--J", - "_oldRev" : "_YOn1ls6--H" -} diff --git a/Documentation/Examples/general_graph__addVertexCollection.generated b/Documentation/Examples/general_graph__addVertexCollection.generated deleted file mode 100644 index a33f0139c423..000000000000 --- a/Documentation/Examples/general_graph__addVertexCollection.generated +++ /dev/null @@ -1,4 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph"); -arangosh> var ed1 = graph_module._relation("myEC1", ["myVC1"], ["myVC2"]); -arangosh> var graph = graph_module._create("myGraph", [ed1]); -arangosh> graph._addVertexCollection("myVC3", true); diff --git a/Documentation/Examples/general_graph__deleteEdgeDefinition.generated b/Documentation/Examples/general_graph__deleteEdgeDefinition.generated deleted file mode 100644 index a6d510fa059c..000000000000 --- a/Documentation/Examples/general_graph__deleteEdgeDefinition.generated +++ /dev/null @@ -1,7 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph") -arangosh> var ed1 = graph_module._relation("myEC1", ["myVC1"], ["myVC2"]); -arangosh> var ed2 = graph_module._relation("myEC2", ["myVC1"], ["myVC3"]); -arangosh> var graph = graph_module._create("myGraph", [ed1, ed2]); -arangosh> graph._deleteEdgeDefinition("myEC1"); -arangosh> db._collection("myEC1"); -[ArangoCollection 27040, "myEC1" (type edge, status loaded)] diff --git a/Documentation/Examples/general_graph__deleteEdgeDefinitionNoDrop.generated b/Documentation/Examples/general_graph__deleteEdgeDefinitionNoDrop.generated deleted file mode 100644 index c0c1ed27679f..000000000000 --- a/Documentation/Examples/general_graph__deleteEdgeDefinitionNoDrop.generated +++ /dev/null @@ -1,7 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph") -arangosh> var ed1 = graph_module._relation("myEC1", ["myVC1"], ["myVC2"]); -arangosh> var ed2 = graph_module._relation("myEC2", ["myVC1"], ["myVC3"]); -arangosh> var graph = graph_module._create("myGraph", [ed1, ed2]); -arangosh> graph._deleteEdgeDefinition("myEC1"); -arangosh> db._collection("myEC1"); -[ArangoCollection 117595, "myEC1" (type edge, status loaded)] diff --git a/Documentation/Examples/general_graph__deleteEdgeDefinitionWithDrop.generated b/Documentation/Examples/general_graph__deleteEdgeDefinitionWithDrop.generated deleted file mode 100644 index 1ffe5b220eeb..000000000000 --- a/Documentation/Examples/general_graph__deleteEdgeDefinitionWithDrop.generated +++ /dev/null @@ -1,7 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph") -arangosh> var ed1 = graph_module._relation("myEC1", ["myVC1"], ["myVC2"]); -arangosh> var ed2 = graph_module._relation("myEC2", ["myVC1"], ["myVC3"]); -arangosh> var graph = graph_module._create("myGraph", [ed1, ed2]); -arangosh> graph._deleteEdgeDefinition("myEC1", true); -arangosh> db._collection("myEC1"); -null diff --git a/Documentation/Examples/general_graph__editEdgeDefinition.generated b/Documentation/Examples/general_graph__editEdgeDefinition.generated deleted file mode 100644 index 98826e7a4ca9..000000000000 --- a/Documentation/Examples/general_graph__editEdgeDefinition.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph") -arangosh> var original = graph_module._relation("myEC1", ["myVC1"], ["myVC2"]); -arangosh> var modified = graph_module._relation("myEC1", ["myVC2"], ["myVC3"]); -arangosh> var graph = graph_module._create("myGraph", [original]); -arangosh> graph._editEdgeDefinitions(modified); diff --git a/Documentation/Examples/general_graph__extendEdgeDefinitions.generated b/Documentation/Examples/general_graph__extendEdgeDefinitions.generated deleted file mode 100644 index 57643d9368a6..000000000000 --- a/Documentation/Examples/general_graph__extendEdgeDefinitions.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph") -arangosh> var ed1 = graph_module._relation("myEC1", ["myVC1"], ["myVC2"]); -arangosh> var ed2 = graph_module._relation("myEC2", ["myVC1"], ["myVC3"]); -arangosh> var graph = graph_module._create("myGraph", [ed1]); -arangosh> graph._extendEdgeDefinitions(ed2); diff --git a/Documentation/Examples/general_graph__orphanCollections.generated b/Documentation/Examples/general_graph__orphanCollections.generated deleted file mode 100644 index be470867374f..000000000000 --- a/Documentation/Examples/general_graph__orphanCollections.generated +++ /dev/null @@ -1,8 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph") -arangosh> var ed1 = graph_module._relation("myEC1", ["myVC1"], ["myVC2"]); -arangosh> var graph = graph_module._create("myGraph", [ed1]); -arangosh> graph._addVertexCollection("myVC3", true); -arangosh> graph._orphanCollections(); -[ - "myVC3" -] diff --git a/Documentation/Examples/general_graph__removeVertexCollections.generated b/Documentation/Examples/general_graph__removeVertexCollections.generated deleted file mode 100644 index f796c839a066..000000000000 --- a/Documentation/Examples/general_graph__removeVertexCollections.generated +++ /dev/null @@ -1,15 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph") -arangosh> var ed1 = graph_module._relation("myEC1", ["myVC1"], ["myVC2"]); -arangosh> var graph = graph_module._create("myGraph", [ed1]); -arangosh> graph._addVertexCollection("myVC3", true); -arangosh> graph._addVertexCollection("myVC4", true); -arangosh> graph._orphanCollections(); -[ - "myVC3", - "myVC4" -] -arangosh> graph._removeVertexCollection("myVC3"); -arangosh> graph._orphanCollections(); -[ - "myVC4" -] diff --git a/Documentation/Examples/general_graph_create_graph_example1.generated b/Documentation/Examples/general_graph_create_graph_example1.generated deleted file mode 100644 index 40848a777ad0..000000000000 --- a/Documentation/Examples/general_graph_create_graph_example1.generated +++ /dev/null @@ -1,15 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph"); -arangosh> var edgeDefinitions = graph_module._edgeDefinitions(); -arangosh> graph_module._extendEdgeDefinitions(edgeDefinitions, graph_module._relation("friend_of", "Customer", "Customer")); -arangosh> graph_module._extendEdgeDefinitions( -........> edgeDefinitions, graph_module._relation( -........> "has_bought", ["Customer", "Company"], ["Groceries", "Electronics"])); -arangosh> graph_module._create("myStore", edgeDefinitions); -{[Graph] - "friend_of" : [ArangoCollection 118014, "friend_of" (type edge, status loaded)], - "Customer" : [ArangoCollection 118004, "Customer" (type document, status loaded)], - "has_bought" : [ArangoCollection 118009, "has_bought" (type edge, status loaded)], - "Company" : [ArangoCollection 117999, "Company" (type document, status loaded)], - "Electronics" : [ArangoCollection 117994, "Electronics" (type document, status loaded)], - "Groceries" : [ArangoCollection 117989, "Groceries" (type document, status loaded)] -} diff --git a/Documentation/Examples/general_graph_create_graph_example2.generated b/Documentation/Examples/general_graph_create_graph_example2.generated deleted file mode 100644 index 2eb45eea3f33..000000000000 --- a/Documentation/Examples/general_graph_create_graph_example2.generated +++ /dev/null @@ -1,13 +0,0 @@ -arangosh> var graph_module = require("@arangodb/general-graph"); -arangosh> var edgeDefinitions = graph_module._edgeDefinitions( -........> graph_module._relation("friend_of", ["Customer"], ["Customer"]), graph_module._relation( -........> "has_bought", ["Customer", "Company"], ["Groceries", "Electronics"])); -arangosh> graph_module._create("myStore", edgeDefinitions); -{[Graph] - "friend_of" : [ArangoCollection 118081, "friend_of" (type edge, status loaded)], - "Customer" : [ArangoCollection 118071, "Customer" (type document, status loaded)], - "has_bought" : [ArangoCollection 118076, "has_bought" (type edge, status loaded)], - "Company" : [ArangoCollection 118066, "Company" (type document, status loaded)], - "Electronics" : [ArangoCollection 118061, "Electronics" (type document, status loaded)], - "Groceries" : [ArangoCollection 118056, "Groceries" (type document, status loaded)] -} diff --git a/Documentation/Examples/geoIndexCreateForArrayAttribute.generated b/Documentation/Examples/geoIndexCreateForArrayAttribute.generated deleted file mode 100644 index 2c139fe08e8c..000000000000 --- a/Documentation/Examples/geoIndexCreateForArrayAttribute.generated +++ /dev/null @@ -1,58 +0,0 @@ -arangosh> db.geo.ensureIndex({ type: "geo", fields: [ "loc" ] }); -{ - "bestIndexedLevel" : 17, - "fields" : [ - "loc" - ], - "geoJson" : false, - "id" : "geo/118128", - "isNewlyCreated" : true, - "maxNumCoverCells" : 8, - "sparse" : true, - "type" : "geo", - "unique" : false, - "worstIndexedLevel" : 4, - "code" : 201 -} -arangosh> for (i = -90; i <= 90; i += 10) { -........> for (j = -180; j <= 180; j += 10) { -........> db.geo.save({ name : "Name/" + i + "/" + j, loc: [ i, j ] }); -........> } -........> } -arangosh> db.geo.count(); -703 -arangosh> db.geo.near(0, 0).limit(3).toArray(); -[ - { - "_key" : "119185", - "_id" : "geo/119185", - "_rev" : "_YOn1mp6--J", - "name" : "Name/0/0", - "loc" : [ - 0, - 0 - ] - }, - { - "_key" : "119074", - "_id" : "geo/119074", - "_rev" : "_YOn1mpm--B", - "name" : "Name/-10/0", - "loc" : [ - -10, - 0 - ] - }, - { - "_key" : "119182", - "_id" : "geo/119182", - "_rev" : "_YOn1mp6--H", - "name" : "Name/0/-10", - "loc" : [ - 0, - -10 - ] - } -] -arangosh> db.geo.near(0, 0).count(); -null diff --git a/Documentation/Examples/geoIndexCreateForArrayAttribute1.generated b/Documentation/Examples/geoIndexCreateForArrayAttribute1.generated deleted file mode 100644 index ac10e6f8fef3..000000000000 --- a/Documentation/Examples/geoIndexCreateForArrayAttribute1.generated +++ /dev/null @@ -1,58 +0,0 @@ -arangosh> db.geo.ensureIndex({ type: "geo", fields: [ "loc" ] }); -{ - "bestIndexedLevel" : 17, - "fields" : [ - "loc" - ], - "geoJson" : false, - "id" : "geo/120261", - "isNewlyCreated" : true, - "maxNumCoverCells" : 8, - "sparse" : true, - "type" : "geo", - "unique" : false, - "worstIndexedLevel" : 4, - "code" : 201 -} -arangosh> for (i = -90; i <= 90; i += 10) { -........> for (j = -180; j <= 180; j += 10) { -........> db.geo.save({ name : "Name/" + i + "/" + j, loc: [ i, j ] }); -........> } -........> } -arangosh> db.geo.count(); -703 -arangosh> db.geo.near(0, 0).limit(3).toArray(); -[ - { - "_key" : "121318", - "_id" : "geo/121318", - "_rev" : "_YOn1m32--J", - "name" : "Name/0/0", - "loc" : [ - 0, - 0 - ] - }, - { - "_key" : "121207", - "_id" : "geo/121207", - "_rev" : "_YOn1m3i--H", - "name" : "Name/-10/0", - "loc" : [ - -10, - 0 - ] - }, - { - "_key" : "121315", - "_id" : "geo/121315", - "_rev" : "_YOn1m32--H", - "name" : "Name/0/-10", - "loc" : [ - 0, - -10 - ] - } -] -arangosh> db.geo.near(0, 0).count(); -null diff --git a/Documentation/Examples/geoIndexCreateForArrayAttribute2.generated b/Documentation/Examples/geoIndexCreateForArrayAttribute2.generated deleted file mode 100644 index 0fc014878296..000000000000 --- a/Documentation/Examples/geoIndexCreateForArrayAttribute2.generated +++ /dev/null @@ -1,55 +0,0 @@ -arangosh> db.geo2.ensureIndex({ type: "geo", fields: [ "location.latitude", "location.longitude" ] }); -{ - "bestIndexedLevel" : 17, - "fields" : [ - "location.latitude", - "location.longitude" - ], - "geoJson" : false, - "id" : "geo2/122394", - "isNewlyCreated" : true, - "maxNumCoverCells" : 8, - "sparse" : true, - "type" : "geo", - "unique" : false, - "worstIndexedLevel" : 4, - "code" : 201 -} -arangosh> for (i = -90; i <= 90; i += 10) { -........> for (j = -180; j <= 180; j += 10) { -........> db.geo2.save({ name : "Name/" + i + "/" + j, location: { latitude : i, longitude : j } }); -........> } -........> } -arangosh> db.geo2.near(0, 0).limit(3).toArray(); -[ - { - "_key" : "123451", - "_id" : "geo2/123451", - "_rev" : "_YOn1nES--F", - "name" : "Name/0/0", - "location" : { - "latitude" : 0, - "longitude" : 0 - } - }, - { - "_key" : "123340", - "_id" : "geo2/123340", - "_rev" : "_YOn1nD2--B", - "name" : "Name/-10/0", - "location" : { - "latitude" : -10, - "longitude" : 0 - } - }, - { - "_key" : "123448", - "_id" : "geo2/123448", - "_rev" : "_YOn1nES--D", - "name" : "Name/0/-10", - "location" : { - "latitude" : 0, - "longitude" : -10 - } - } -] diff --git a/Documentation/Examples/geoIndexFilterOptimization.generated b/Documentation/Examples/geoIndexFilterOptimization.generated deleted file mode 100644 index a372b04e04ba..000000000000 --- a/Documentation/Examples/geoIndexFilterOptimization.generated +++ /dev/null @@ -1,55 +0,0 @@ -arangosh> db.geoFilter.ensureIndex({ type: "geo", fields: [ "latitude", "longitude" ] }); -{ - "bestIndexedLevel" : 17, - "fields" : [ - "latitude", - "longitude" - ], - "geoJson" : false, - "id" : "geoFilter/124522", - "isNewlyCreated" : true, - "maxNumCoverCells" : 8, - "sparse" : true, - "type" : "geo", - "unique" : false, - "worstIndexedLevel" : 4, - "code" : 201 -} -arangosh> for (i = -90; i <= 90; i += 10) { -........> for (j = -180; j <= 180; j += 10) { -........> db.geoFilter.save({ name : "Name/" + i + "/" + j, latitude : i, longitude : j }); -........> } -........> } -arangosh> var query = "FOR doc in geoFilter FILTER DISTANCE(doc.latitude, doc.longitude, 0, 0) < 2000 RETURN doc" -arangosh> db._explain(query, {}, {colors: false}); -Query String: - FOR doc in geoFilter FILTER DISTANCE(doc.latitude, doc.longitude, 0, 0) < 2000 RETURN doc - -Execution plan: - Id NodeType Est. Comment - 1 SingletonNode 1 * ROOT - 6 IndexNode 703 - FOR doc IN geoFilter /* geo index scan */ - 5 ReturnNode 703 - RETURN doc - -Indexes used: - By Type Collection Unique Sparse Selectivity Fields Ranges - 6 geo geoFilter false true n/a [ `latitude`, `longitude` ] (GEO_DISTANCE([ 0, 0 ], [ doc.`longitude`, doc.`latitude` ]) < 2000) - -Optimization rules applied: - Id RuleName - 1 geo-index-optimizer - 2 remove-unnecessary-calculations-2 - - -arangosh> db._query(query); -[ - { - "_key" : "125579", - "_id" : "geoFilter/125579", - "_rev" : "_YOn1nQW--B", - "name" : "Name/0/0", - "latitude" : 0, - "longitude" : 0 - } -] -[object ArangoQueryCursor, count: 1, cached: false, hasMore: false] diff --git a/Documentation/Examples/geoIndexSimpleQuery.generated b/Documentation/Examples/geoIndexSimpleQuery.generated deleted file mode 100644 index e7a504ea7e62..000000000000 --- a/Documentation/Examples/geoIndexSimpleQuery.generated +++ /dev/null @@ -1,190 +0,0 @@ -arangosh> for (i = -90; i <= 90; i += 10) { -........> for (j = -180; j <= 180; j += 10) { -........> db.complex.save({ name : "Name/" + i + "/" + j, -........> home : [ i, j ], -........> work : [ -i, -j ] }); -........> } -........> } -........> -arangosh> db.complex.near(0, 170).limit(5); -[ArangoError 1570: no suitable geo index found for geo restriction on 'complex'] -arangosh> db.complex.ensureIndex({ type: "geo", fields: [ "home" ] }); -{ - "bestIndexedLevel" : 17, - "fields" : [ - "home" - ], - "geoJson" : false, - "id" : "complex/128759", - "isNewlyCreated" : true, - "maxNumCoverCells" : 8, - "sparse" : true, - "type" : "geo", - "unique" : false, - "worstIndexedLevel" : 4, - "code" : 201 -} -arangosh> db.complex.near(0, 170).limit(5).toArray(); -[ - { - "_key" : "127753", - "_id" : "complex/127753", - "_rev" : "_YOn1nhe--D", - "name" : "Name/0/170", - "home" : [ - 0, - 170 - ], - "work" : [ - 0, - -170 - ] - }, - { - "_key" : "127756", - "_id" : "complex/127756", - "_rev" : "_YOn1nhe--F", - "name" : "Name/0/180", - "home" : [ - 0, - 180 - ], - "work" : [ - 0, - -180 - ] - }, - { - "_key" : "127864", - "_id" : "complex/127864", - "_rev" : "_YOn1nhy--H", - "name" : "Name/10/170", - "home" : [ - 10, - 170 - ], - "work" : [ - -10, - -170 - ] - }, - { - "_key" : "127642", - "_id" : "complex/127642", - "_rev" : "_YOn1ndS--D", - "name" : "Name/-10/170", - "home" : [ - -10, - 170 - ], - "work" : [ - 10, - -170 - ] - }, - { - "_key" : "127648", - "_id" : "complex/127648", - "_rev" : "_YOn1ndS--H", - "name" : "Name/0/-180", - "home" : [ - 0, - -180 - ], - "work" : [ - 0, - 180 - ] - } -] -arangosh> db.complex.geo("work").near(0, 170).limit(5); -[ArangoError 1570: no suitable geo index found for geo restriction on 'complex'] -arangosh> db.complex.ensureIndex({ type: "geo", fields: [ "work" ] }); -{ - "bestIndexedLevel" : 17, - "fields" : [ - "work" - ], - "geoJson" : false, - "id" : "complex/128767", - "isNewlyCreated" : true, - "maxNumCoverCells" : 8, - "sparse" : true, - "type" : "geo", - "unique" : false, - "worstIndexedLevel" : 4, - "code" : 201 -} -arangosh> db.complex.geo("work").near(0, 170).limit(5).toArray(); -[ - { - "_key" : "127753", - "_id" : "complex/127753", - "_rev" : "_YOn1nhe--D", - "name" : "Name/0/170", - "home" : [ - 0, - 170 - ], - "work" : [ - 0, - -170 - ] - }, - { - "_key" : "127756", - "_id" : "complex/127756", - "_rev" : "_YOn1nhe--F", - "name" : "Name/0/180", - "home" : [ - 0, - 180 - ], - "work" : [ - 0, - -180 - ] - }, - { - "_key" : "127864", - "_id" : "complex/127864", - "_rev" : "_YOn1nhy--H", - "name" : "Name/10/170", - "home" : [ - 10, - 170 - ], - "work" : [ - -10, - -170 - ] - }, - { - "_key" : "127642", - "_id" : "complex/127642", - "_rev" : "_YOn1ndS--D", - "name" : "Name/-10/170", - "home" : [ - -10, - 170 - ], - "work" : [ - 10, - -170 - ] - }, - { - "_key" : "127648", - "_id" : "complex/127648", - "_rev" : "_YOn1ndS--H", - "name" : "Name/0/-180", - "home" : [ - 0, - -180 - ], - "work" : [ - 0, - 180 - ] - } -] diff --git a/Documentation/Examples/geoIndexSortOptimization.generated b/Documentation/Examples/geoIndexSortOptimization.generated deleted file mode 100644 index 4cf5fb403565..000000000000 --- a/Documentation/Examples/geoIndexSortOptimization.generated +++ /dev/null @@ -1,88 +0,0 @@ -arangosh> db.geoSort.ensureIndex({ type: "geo", fields: [ "latitude", "longitude" ] }); -{ - "bestIndexedLevel" : 17, - "fields" : [ - "latitude", - "longitude" - ], - "geoJson" : false, - "id" : "geoSort/128785", - "isNewlyCreated" : true, - "maxNumCoverCells" : 8, - "sparse" : true, - "type" : "geo", - "unique" : false, - "worstIndexedLevel" : 4, - "code" : 201 -} -arangosh> for (i = -90; i <= 90; i += 10) { -........> for (j = -180; j <= 180; j += 10) { -........> db.geoSort.save({ name : "Name/" + i + "/" + j, latitude : i, longitude : j }); -........> } -........> } -arangosh> var query = "FOR doc in geoSort SORT DISTANCE(doc.latitude, doc.longitude, 0, 0) LIMIT 5 RETURN doc" -arangosh> db._explain(query, {}, {colors: false}); -Query String: - FOR doc in geoSort SORT DISTANCE(doc.latitude, doc.longitude, 0, 0) LIMIT 5 RETURN doc - -Execution plan: - Id NodeType Est. Comment - 1 SingletonNode 1 * ROOT - 7 IndexNode 703 - FOR doc IN geoSort /* geo index scan */ - 5 LimitNode 5 - LIMIT 0, 5 - 6 ReturnNode 5 - RETURN doc - -Indexes used: - By Type Collection Unique Sparse Selectivity Fields Ranges - 7 geo geoSort false true n/a [ `latitude`, `longitude` ] (GEO_DISTANCE([ 0, 0 ], [ doc.`longitude`, doc.`latitude` ]) < "unlimited") - -Optimization rules applied: - Id RuleName - 1 geo-index-optimizer - 2 remove-unnecessary-calculations-2 - - -arangosh> db._query(query); -[ - { - "_key" : "129842", - "_id" : "geoSort/129842", - "_rev" : "_YOn1nwC--D", - "name" : "Name/0/0", - "latitude" : 0, - "longitude" : 0 - }, - { - "_key" : "129731", - "_id" : "geoSort/129731", - "_rev" : "_YOn1nvq--_", - "name" : "Name/-10/0", - "latitude" : -10, - "longitude" : 0 - }, - { - "_key" : "129839", - "_id" : "geoSort/129839", - "_rev" : "_YOn1nwC--B", - "name" : "Name/0/-10", - "latitude" : 0, - "longitude" : -10 - }, - { - "_key" : "129953", - "_id" : "geoSort/129953", - "_rev" : "_YOn1nwW--L", - "name" : "Name/10/0", - "latitude" : 10, - "longitude" : 0 - }, - { - "_key" : "129845", - "_id" : "geoSort/129845", - "_rev" : "_YOn1nwC--F", - "name" : "Name/0/10", - "latitude" : 0, - "longitude" : 10 - } -] -[object ArangoQueryCursor, count: 5, cached: false, hasMore: false] diff --git a/Documentation/Examples/graph_create_cities_sample.generated b/Documentation/Examples/graph_create_cities_sample.generated deleted file mode 100644 index 2b01e1523eeb..000000000000 --- a/Documentation/Examples/graph_create_cities_sample.generated +++ /dev/null @@ -1,168 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var g = examples.loadGraph("routeplanner"); -arangosh> db.frenchCity.toArray(); -[ - { - "_key" : "Lyon", - "_id" : "frenchCity/Lyon", - "_rev" : "_YOn1n2K--D", - "population" : 80000, - "isCapital" : false, - "geometry" : { - "type" : "Point", - "coordinates" : [ - 4.84, - 45.76 - ] - } - }, - { - "_key" : "Paris", - "_id" : "frenchCity/Paris", - "_rev" : "_YOn1n2K--F", - "population" : 4000000, - "isCapital" : true, - "geometry" : { - "type" : "Point", - "coordinates" : [ - 2.3508, - 48.8567 - ] - } - } -] -arangosh> db.germanCity.toArray(); -[ - { - "_key" : "Cologne", - "_id" : "germanCity/Cologne", - "_rev" : "_YOn1n2K--_", - "population" : 1000000, - "isCapital" : false, - "geometry" : { - "type" : "Point", - "coordinates" : [ - 6.9528, - 50.9364 - ] - } - }, - { - "_key" : "Hamburg", - "_id" : "germanCity/Hamburg", - "_rev" : "_YOn1n2K--B", - "population" : 1000000, - "isCapital" : false, - "geometry" : { - "type" : "Point", - "coordinates" : [ - 10.0014, - 53.5653 - ] - } - }, - { - "_key" : "Berlin", - "_id" : "germanCity/Berlin", - "_rev" : "_YOn1n2G--_", - "population" : 3000000, - "isCapital" : true, - "geometry" : { - "type" : "Point", - "coordinates" : [ - 13.3833, - 52.5167 - ] - } - } -] -arangosh> db.germanHighway.toArray(); -[ - { - "_key" : "130955", - "_id" : "germanHighway/130955", - "_from" : "germanCity/Berlin", - "_to" : "germanCity/Cologne", - "_rev" : "_YOn1n4G--_", - "distance" : 850 - }, - { - "_key" : "130959", - "_id" : "germanHighway/130959", - "_from" : "germanCity/Berlin", - "_to" : "germanCity/Hamburg", - "_rev" : "_YOn1n4G--B", - "distance" : 400 - }, - { - "_key" : "130962", - "_id" : "germanHighway/130962", - "_from" : "germanCity/Hamburg", - "_to" : "germanCity/Cologne", - "_rev" : "_YOn1n4G--D", - "distance" : 500 - } -] -arangosh> db.frenchHighway.toArray(); -[ - { - "_key" : "130965", - "_id" : "frenchHighway/130965", - "_from" : "frenchCity/Paris", - "_to" : "frenchCity/Lyon", - "_rev" : "_YOn1n4K--_", - "distance" : 550 - } -] -arangosh> db.internationalHighway.toArray(); -[ - { - "_key" : "130969", - "_id" : "internationalHighway/130969", - "_from" : "germanCity/Berlin", - "_to" : "frenchCity/Lyon", - "_rev" : "_YOn1n4K--B", - "distance" : 1100 - }, - { - "_key" : "130976", - "_id" : "internationalHighway/130976", - "_from" : "germanCity/Hamburg", - "_to" : "frenchCity/Paris", - "_rev" : "_YOn1n4K--F", - "distance" : 900 - }, - { - "_key" : "130985", - "_id" : "internationalHighway/130985", - "_from" : "germanCity/Cologne", - "_to" : "frenchCity/Paris", - "_rev" : "_YOn1n4O--D", - "distance" : 550 - }, - { - "_key" : "130973", - "_id" : "internationalHighway/130973", - "_from" : "germanCity/Berlin", - "_to" : "frenchCity/Paris", - "_rev" : "_YOn1n4K--D", - "distance" : 1200 - }, - { - "_key" : "130979", - "_id" : "internationalHighway/130979", - "_from" : "germanCity/Hamburg", - "_to" : "frenchCity/Lyon", - "_rev" : "_YOn1n4O--_", - "distance" : 1300 - }, - { - "_key" : "130982", - "_id" : "internationalHighway/130982", - "_from" : "germanCity/Cologne", - "_to" : "frenchCity/Lyon", - "_rev" : "_YOn1n4O--B", - "distance" : 700 - } -] -arangosh> examples.dropGraph("routeplanner"); diff --git a/Documentation/Examples/graph_create_knows_sample.generated b/Documentation/Examples/graph_create_knows_sample.generated deleted file mode 100644 index 53942782f46f..000000000000 --- a/Documentation/Examples/graph_create_knows_sample.generated +++ /dev/null @@ -1,79 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var g = examples.loadGraph("knows_graph"); -arangosh> db.persons.toArray() -[ - { - "_key" : "bob", - "_id" : "persons/bob", - "_rev" : "_YOn1n72--B", - "name" : "Bob" - }, - { - "_key" : "charlie", - "_id" : "persons/charlie", - "_rev" : "_YOn1n72--D", - "name" : "Charlie" - }, - { - "_key" : "dave", - "_id" : "persons/dave", - "_rev" : "_YOn1n72--F", - "name" : "Dave" - }, - { - "_key" : "eve", - "_id" : "persons/eve", - "_rev" : "_YOn1n72--H", - "name" : "Eve" - }, - { - "_key" : "alice", - "_id" : "persons/alice", - "_rev" : "_YOn1n72--_", - "name" : "Alice" - } -] -arangosh> db.knows.toArray(); -[ - { - "_key" : "131059", - "_id" : "knows/131059", - "_from" : "persons/alice", - "_to" : "persons/bob", - "_rev" : "_YOn1n76--_", - "vertex" : "alice" - }, - { - "_key" : "131069", - "_id" : "knows/131069", - "_from" : "persons/eve", - "_to" : "persons/alice", - "_rev" : "_YOn1n76--F", - "vertex" : "eve" - }, - { - "_key" : "131072", - "_id" : "knows/131072", - "_from" : "persons/eve", - "_to" : "persons/bob", - "_rev" : "_YOn1n76--H", - "vertex" : "eve" - }, - { - "_key" : "131063", - "_id" : "knows/131063", - "_from" : "persons/bob", - "_to" : "persons/charlie", - "_rev" : "_YOn1n76--B", - "vertex" : "bob" - }, - { - "_key" : "131066", - "_id" : "knows/131066", - "_from" : "persons/bob", - "_to" : "persons/dave", - "_rev" : "_YOn1n76--D", - "vertex" : "bob" - } -] -arangosh> examples.dropGraph("knows_graph"); diff --git a/Documentation/Examples/graph_create_mps_sample.generated b/Documentation/Examples/graph_create_mps_sample.generated deleted file mode 100644 index 997938c0c275..000000000000 --- a/Documentation/Examples/graph_create_mps_sample.generated +++ /dev/null @@ -1,95 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var g = examples.loadGraph("mps_graph"); -arangosh> db.mps_verts.toArray(); -[ - { - "_key" : "F", - "_id" : "mps_verts/F", - "_rev" : "_YOn1o----D" - }, - { - "_key" : "A", - "_id" : "mps_verts/A", - "_rev" : "_YOn1n96--_" - }, - { - "_key" : "E", - "_id" : "mps_verts/E", - "_rev" : "_YOn1o----B" - }, - { - "_key" : "C", - "_id" : "mps_verts/C", - "_rev" : "_YOn1n96--D" - }, - { - "_key" : "D", - "_id" : "mps_verts/D", - "_rev" : "_YOn1o----_" - }, - { - "_key" : "B", - "_id" : "mps_verts/B", - "_rev" : "_YOn1n96--B" - } -] -arangosh> db.mps_edges.toArray(); -[ - { - "_key" : "131131", - "_id" : "mps_edges/131131", - "_from" : "mps_verts/A", - "_to" : "mps_verts/E", - "_rev" : "_YOn1o----H", - "vertex" : "A" - }, - { - "_key" : "131143", - "_id" : "mps_edges/131143", - "_from" : "mps_verts/E", - "_to" : "mps_verts/F", - "_rev" : "_YOn1o-C--D", - "vertex" : "E" - }, - { - "_key" : "131146", - "_id" : "mps_edges/131146", - "_from" : "mps_verts/F", - "_to" : "mps_verts/C", - "_rev" : "_YOn1o-C--F", - "vertex" : "F" - }, - { - "_key" : "131134", - "_id" : "mps_edges/131134", - "_from" : "mps_verts/A", - "_to" : "mps_verts/D", - "_rev" : "_YOn1o----J", - "vertex" : "A" - }, - { - "_key" : "131137", - "_id" : "mps_edges/131137", - "_from" : "mps_verts/B", - "_to" : "mps_verts/C", - "_rev" : "_YOn1o-C--_", - "vertex" : "B" - }, - { - "_key" : "131127", - "_id" : "mps_edges/131127", - "_from" : "mps_verts/A", - "_to" : "mps_verts/B", - "_rev" : "_YOn1o----F", - "vertex" : "A" - }, - { - "_key" : "131140", - "_id" : "mps_edges/131140", - "_from" : "mps_verts/D", - "_to" : "mps_verts/C", - "_rev" : "_YOn1o-C--B", - "vertex" : "D" - } -] -arangosh> examples.dropGraph("mps_graph"); diff --git a/Documentation/Examples/graph_create_social_sample.generated b/Documentation/Examples/graph_create_social_sample.generated deleted file mode 100644 index 97766221effb..000000000000 --- a/Documentation/Examples/graph_create_social_sample.generated +++ /dev/null @@ -1,72 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var graph = examples.loadGraph("social"); -arangosh> db.female.toArray() -[ - { - "_key" : "diana", - "_id" : "female/diana", - "_rev" : "_YOn1oAi--F", - "name" : "Diana" - }, - { - "_key" : "alice", - "_id" : "female/alice", - "_rev" : "_YOn1oAi--_", - "name" : "Alice" - } -] -arangosh> db.male.toArray() -[ - { - "_key" : "bob", - "_id" : "male/bob", - "_rev" : "_YOn1oAi--B", - "name" : "Bob" - }, - { - "_key" : "charly", - "_id" : "male/charly", - "_rev" : "_YOn1oAi--D", - "name" : "Charly" - } -] -arangosh> db.relation.toArray() -[ - { - "_key" : "131207", - "_id" : "relation/131207", - "_from" : "female/alice", - "_to" : "male/bob", - "_rev" : "_YOn1oAm--_", - "type" : "married", - "vertex" : "alice" - }, - { - "_key" : "131211", - "_id" : "relation/131211", - "_from" : "female/alice", - "_to" : "male/charly", - "_rev" : "_YOn1oAm--B", - "type" : "friend", - "vertex" : "alice" - }, - { - "_key" : "131217", - "_id" : "relation/131217", - "_from" : "male/bob", - "_to" : "female/diana", - "_rev" : "_YOn1oAm--F", - "type" : "friend", - "vertex" : "bob" - }, - { - "_key" : "131214", - "_id" : "relation/131214", - "_from" : "male/charly", - "_to" : "female/diana", - "_rev" : "_YOn1oAm--D", - "type" : "married", - "vertex" : "charly" - } -] -arangosh> examples.dropGraph("social"); diff --git a/Documentation/Examples/graph_create_traversal_sample.generated b/Documentation/Examples/graph_create_traversal_sample.generated deleted file mode 100644 index 23a4ca699774..000000000000 --- a/Documentation/Examples/graph_create_traversal_sample.generated +++ /dev/null @@ -1,175 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var g = examples.loadGraph("traversalGraph"); -arangosh> db.circles.toArray(); -[ - { - "_key" : "I", - "_id" : "circles/I", - "_rev" : "_YOn1oDK--J", - "label" : "9" - }, - { - "_key" : "G", - "_id" : "circles/G", - "_rev" : "_YOn1oDK--F", - "label" : "7" - }, - { - "_key" : "F", - "_id" : "circles/F", - "_rev" : "_YOn1oDK--D", - "label" : "6" - }, - { - "_key" : "A", - "_id" : "circles/A", - "_rev" : "_YOn1oDG--_", - "label" : "1" - }, - { - "_key" : "E", - "_id" : "circles/E", - "_rev" : "_YOn1oDK--B", - "label" : "5" - }, - { - "_key" : "C", - "_id" : "circles/C", - "_rev" : "_YOn1oDG--D", - "label" : "3" - }, - { - "_key" : "D", - "_id" : "circles/D", - "_rev" : "_YOn1oDK--_", - "label" : "4" - }, - { - "_key" : "J", - "_id" : "circles/J", - "_rev" : "_YOn1oEa--_", - "label" : "10" - }, - { - "_key" : "B", - "_id" : "circles/B", - "_rev" : "_YOn1oDG--B", - "label" : "2" - }, - { - "_key" : "H", - "_id" : "circles/H", - "_rev" : "_YOn1oDK--H", - "label" : "8" - }, - { - "_key" : "K", - "_id" : "circles/K", - "_rev" : "_YOn1oEe--_", - "label" : "11" - } -] -arangosh> db.edges.toArray(); -[ - { - "_key" : "131311", - "_id" : "edges/131311", - "_from" : "circles/H", - "_to" : "circles/I", - "_rev" : "_YOn1oEm--B", - "theFalse" : false, - "theTruth" : true, - "label" : "right_blub" - }, - { - "_key" : "131308", - "_id" : "edges/131308", - "_from" : "circles/G", - "_to" : "circles/H", - "_rev" : "_YOn1oEm--_", - "theFalse" : false, - "theTruth" : true, - "label" : "right_blob" - }, - { - "_key" : "131289", - "_id" : "edges/131289", - "_from" : "circles/A", - "_to" : "circles/B", - "_rev" : "_YOn1oEe--B", - "theFalse" : false, - "theTruth" : true, - "label" : "left_bar" - }, - { - "_key" : "131317", - "_id" : "edges/131317", - "_from" : "circles/J", - "_to" : "circles/K", - "_rev" : "_YOn1oEm--F", - "theFalse" : false, - "theTruth" : true, - "label" : "right_zup" - }, - { - "_key" : "131302", - "_id" : "edges/131302", - "_from" : "circles/E", - "_to" : "circles/F", - "_rev" : "_YOn1oEi--D", - "theFalse" : false, - "theTruth" : true, - "label" : "left_schubi" - }, - { - "_key" : "131296", - "_id" : "edges/131296", - "_from" : "circles/C", - "_to" : "circles/D", - "_rev" : "_YOn1oEi--_", - "theFalse" : false, - "theTruth" : true, - "label" : "left_blorg" - }, - { - "_key" : "131314", - "_id" : "edges/131314", - "_from" : "circles/G", - "_to" : "circles/J", - "_rev" : "_YOn1oEm--D", - "theFalse" : false, - "theTruth" : true, - "label" : "right_zip" - }, - { - "_key" : "131305", - "_id" : "edges/131305", - "_from" : "circles/A", - "_to" : "circles/G", - "_rev" : "_YOn1oEi--F", - "theFalse" : false, - "theTruth" : true, - "label" : "right_foo" - }, - { - "_key" : "131293", - "_id" : "edges/131293", - "_from" : "circles/B", - "_to" : "circles/C", - "_rev" : "_YOn1oEe--D", - "theFalse" : false, - "theTruth" : true, - "label" : "left_blarg" - }, - { - "_key" : "131299", - "_id" : "edges/131299", - "_from" : "circles/B", - "_to" : "circles/E", - "_rev" : "_YOn1oEi--B", - "theFalse" : false, - "theTruth" : true, - "label" : "left_blub" - } -] -arangosh> examples.dropGraph("traversalGraph"); diff --git a/Documentation/Examples/graph_create_world_sample.generated b/Documentation/Examples/graph_create_world_sample.generated deleted file mode 100644 index 2543d115d68d..000000000000 --- a/Documentation/Examples/graph_create_world_sample.generated +++ /dev/null @@ -1,1348 +0,0 @@ -arangosh> var examples = require("@arangodb/graph-examples/example-graph.js"); -arangosh> var g = examples.loadGraph("worldCountry"); -arangosh> db.worldVertices.toArray(); -[ - { - "_key" : "capital-ottawa", - "_id" : "worldVertices/capital-ottawa", - "_rev" : "_YOn1oMO--L", - "name" : "Ottawa", - "type" : "capital" - }, - { - "_key" : "capital-yaounde", - "_id" : "worldVertices/capital-yaounde", - "_rev" : "_YOn1oMW--H", - "name" : "Yaounde", - "type" : "capital" - }, - { - "_key" : "capital-algiers", - "_id" : "worldVertices/capital-algiers", - "_rev" : "_YOn1oMC--N", - "name" : "Algiers", - "type" : "capital" - }, - { - "_key" : "continent-south-america", - "_id" : "worldVertices/continent-south-america", - "_rev" : "_YOn1oGm--J", - "name" : "South America", - "type" : "continent" - }, - { - "_key" : "capital-andorra-la-vella", - "_id" : "worldVertices/capital-andorra-la-vella", - "_rev" : "_YOn1oMG--_", - "name" : "Andorra la Vella", - "type" : "capital" - }, - { - "_key" : "country-people-s-republic-of-china", - "_id" : "worldVertices/country-people-s-republic-of-china", - "_rev" : "_YOn1oMC--L", - "name" : "People's Republic of China", - "type" : "country", - "code" : "CHN" - }, - { - "_key" : "capital-tirana", - "_id" : "worldVertices/capital-tirana", - "_rev" : "_YOn1oMW--B", - "name" : "Tirana", - "type" : "capital" - }, - { - "_key" : "country-cote-d-ivoire", - "_id" : "worldVertices/country-cote-d-ivoire", - "_rev" : "_YOn1oL6--B", - "name" : "Cote d'Ivoire", - "type" : "country", - "code" : "CIV" - }, - { - "_key" : "capital-sofia", - "_id" : "worldVertices/capital-sofia", - "_rev" : "_YOn1oMS--L", - "name" : "Sofia", - "type" : "capital" - }, - { - "_key" : "capital-bridgetown", - "_id" : "worldVertices/capital-bridgetown", - "_rev" : "_YOn1oMG--N", - "name" : "Bridgetown", - "type" : "capital" - }, - { - "_key" : "country-chad", - "_id" : "worldVertices/country-chad", - "_rev" : "_YOn1oKq--B", - "name" : "Chad", - "type" : "country", - "code" : "TCD" - }, - { - "_key" : "capital-thimphu", - "_id" : "worldVertices/capital-thimphu", - "_rev" : "_YOn1oMW--_", - "name" : "Thimphu", - "type" : "capital" - }, - { - "_key" : "capital-santiago", - "_id" : "worldVertices/capital-santiago", - "_rev" : "_YOn1oMS--H", - "name" : "Santiago", - "type" : "capital" - }, - { - "_key" : "capital-manama", - "_id" : "worldVertices/capital-manama", - "_rev" : "_YOn1oMO--F", - "name" : "Manama", - "type" : "capital" - }, - { - "_key" : "capital-zagreb", - "_id" : "worldVertices/capital-zagreb", - "_rev" : "_YOn1oMa--_", - "name" : "Zagreb", - "type" : "capital" - }, - { - "_key" : "country-brazil", - "_id" : "worldVertices/country-brazil", - "_rev" : "_YOn1oJS--H", - "name" : "Brazil", - "type" : "country", - "code" : "BRA" - }, - { - "_key" : "country-burundi", - "_id" : "worldVertices/country-burundi", - "_rev" : "_YOn1oKm--B", - "name" : "Burundi", - "type" : "country", - "code" : "BDI" - }, - { - "_key" : "capital-la-paz", - "_id" : "worldVertices/capital-la-paz", - "_rev" : "_YOn1oMO--B", - "name" : "La Paz", - "type" : "capital" - }, - { - "_key" : "country-germany", - "_id" : "worldVertices/country-germany", - "_rev" : "_YOn1oMC--J", - "name" : "Germany", - "type" : "country", - "code" : "DEU" - }, - { - "_key" : "country-botswana", - "_id" : "worldVertices/country-botswana", - "_rev" : "_YOn1oJS--F", - "name" : "Botswana", - "type" : "country", - "code" : "BWA" - }, - { - "_key" : "capital-phnom-penh", - "_id" : "worldVertices/capital-phnom-penh", - "_rev" : "_YOn1oMS--_", - "name" : "Phnom Penh", - "type" : "capital" - }, - { - "_key" : "country-croatia", - "_id" : "worldVertices/country-croatia", - "_rev" : "_YOn1oM---_", - "name" : "Croatia", - "type" : "country", - "code" : "HRV" - }, - { - "_key" : "country-eritrea", - "_id" : "worldVertices/country-eritrea", - "_rev" : "_YOn1oMC--D", - "name" : "Eritrea", - "type" : "country", - "code" : "ERI" - }, - { - "_key" : "country-angola", - "_id" : "worldVertices/country-angola", - "_rev" : "_YOn1oGq--D", - "name" : "Angola", - "type" : "country", - "code" : "AGO" - }, - { - "_key" : "country-bahrain", - "_id" : "worldVertices/country-bahrain", - "_rev" : "_YOn1oI---B", - "name" : "Bahrain", - "type" : "country", - "code" : "BHR" - }, - { - "_key" : "country-argentina", - "_id" : "worldVertices/country-argentina", - "_rev" : "_YOn1oH6--_", - "name" : "Argentina", - "type" : "country", - "code" : "ARG" - }, - { - "_key" : "capital-canberra", - "_id" : "worldVertices/capital-canberra", - "_rev" : "_YOn1oMK--F", - "name" : "Canberra", - "type" : "capital" - }, - { - "_key" : "capital-bujumbura", - "_id" : "worldVertices/capital-bujumbura", - "_rev" : "_YOn1oMK--B", - "name" : "Bujumbura", - "type" : "capital" - }, - { - "_key" : "country-bangladesh", - "_id" : "worldVertices/country-bangladesh", - "_rev" : "_YOn1oI---D", - "name" : "Bangladesh", - "type" : "country", - "code" : "BGD" - }, - { - "_key" : "country-ecuador", - "_id" : "worldVertices/country-ecuador", - "_rev" : "_YOn1oMC--_", - "name" : "Ecuador", - "type" : "country", - "code" : "ECU" - }, - { - "_key" : "continent-africa", - "_id" : "worldVertices/continent-africa", - "_rev" : "_YOn1oGm--_", - "name" : "Africa", - "type" : "continent" - }, - { - "_key" : "country-cambodia", - "_id" : "worldVertices/country-cambodia", - "_rev" : "_YOn1oKm--D", - "name" : "Cambodia", - "type" : "country", - "code" : "KHM" - }, - { - "_key" : "country-chile", - "_id" : "worldVertices/country-chile", - "_rev" : "_YOn1oKq--D", - "name" : "Chile", - "type" : "country", - "code" : "CHL" - }, - { - "_key" : "country-bolivia", - "_id" : "worldVertices/country-bolivia", - "_rev" : "_YOn1oJS--B", - "name" : "Bolivia", - "type" : "country", - "code" : "BOL" - }, - { - "_key" : "country-belgium", - "_id" : "worldVertices/country-belgium", - "_rev" : "_YOn1oJO--_", - "name" : "Belgium", - "type" : "country", - "code" : "BEL" - }, - { - "_key" : "capital-copenhagen", - "_id" : "worldVertices/capital-copenhagen", - "_rev" : "_YOn1oMK--H", - "name" : "Copenhagen", - "type" : "capital" - }, - { - "_key" : "country-cameroon", - "_id" : "worldVertices/country-cameroon", - "_rev" : "_YOn1oKm--F", - "name" : "Cameroon", - "type" : "country", - "code" : "CMR" - }, - { - "_key" : "capital-gaborone", - "_id" : "worldVertices/capital-gaborone", - "_rev" : "_YOn1oMK--L", - "name" : "Gaborone", - "type" : "capital" - }, - { - "_key" : "continent-australia", - "_id" : "worldVertices/continent-australia", - "_rev" : "_YOn1oGm--D", - "name" : "Australia", - "type" : "continent" - }, - { - "_key" : "world", - "_id" : "worldVertices/world", - "_rev" : "_YOn1oGi--_", - "name" : "World", - "type" : "root" - }, - { - "_key" : "capital-yamoussoukro", - "_id" : "worldVertices/capital-yamoussoukro", - "_rev" : "_YOn1oMW--F", - "name" : "Yamoussoukro", - "type" : "capital" - }, - { - "_key" : "capital-brasilia", - "_id" : "worldVertices/capital-brasilia", - "_rev" : "_YOn1oMG--L", - "name" : "Brasilia", - "type" : "capital" - }, - { - "_key" : "country-antigua-and-barbuda", - "_id" : "worldVertices/country-antigua-and-barbuda", - "_rev" : "_YOn1oGq--F", - "name" : "Antigua and Barbuda", - "type" : "country", - "code" : "ATG" - }, - { - "_key" : "capital-bandar-seri-begawan", - "_id" : "worldVertices/capital-bandar-seri-begawan", - "_rev" : "_YOn1oMG--D", - "name" : "Bandar Seri Begawan", - "type" : "capital" - }, - { - "_key" : "capital-dhaka", - "_id" : "worldVertices/capital-dhaka", - "_rev" : "_YOn1oMK--J", - "name" : "Dhaka", - "type" : "capital" - }, - { - "_key" : "capital-saint-john-s", - "_id" : "worldVertices/capital-saint-john-s", - "_rev" : "_YOn1oMS--F", - "name" : "Saint John's", - "type" : "capital" - }, - { - "_key" : "country-burkina-faso", - "_id" : "worldVertices/country-burkina-faso", - "_rev" : "_YOn1oKm--_", - "name" : "Burkina Faso", - "type" : "country", - "code" : "BFA" - }, - { - "_key" : "capital-prague", - "_id" : "worldVertices/capital-prague", - "_rev" : "_YOn1oMS--B", - "name" : "Prague", - "type" : "capital" - }, - { - "_key" : "country-czech-republic", - "_id" : "worldVertices/country-czech-republic", - "_rev" : "_YOn1oM---B", - "name" : "Czech Republic", - "type" : "country", - "code" : "CZE" - }, - { - "_key" : "country-egypt", - "_id" : "worldVertices/country-egypt", - "_rev" : "_YOn1oMC--B", - "name" : "Egypt", - "type" : "country", - "code" : "EGY" - }, - { - "_key" : "capital-helsinki", - "_id" : "worldVertices/capital-helsinki", - "_rev" : "_YOn1oMK--N", - "name" : "Helsinki", - "type" : "capital" - }, - { - "_key" : "country-bhutan", - "_id" : "worldVertices/country-bhutan", - "_rev" : "_YOn1oJS--_", - "name" : "Bhutan", - "type" : "country", - "code" : "BTN" - }, - { - "_key" : "country-algeria", - "_id" : "worldVertices/country-algeria", - "_rev" : "_YOn1oGq--_", - "name" : "Algeria", - "type" : "country", - "code" : "DZA" - }, - { - "_key" : "country-afghanistan", - "_id" : "worldVertices/country-afghanistan", - "_rev" : "_YOn1oGm--L", - "name" : "Afghanistan", - "type" : "country", - "code" : "AFG" - }, - { - "_key" : "capital-paris", - "_id" : "worldVertices/capital-paris", - "_rev" : "_YOn1oMO--P", - "name" : "Paris", - "type" : "capital" - }, - { - "_key" : "country-finland", - "_id" : "worldVertices/country-finland", - "_rev" : "_YOn1oMC--F", - "name" : "Finland", - "type" : "country", - "code" : "FIN" - }, - { - "_key" : "country-austria", - "_id" : "worldVertices/country-austria", - "_rev" : "_YOn1oH6--D", - "name" : "Austria", - "type" : "country", - "code" : "AUT" - }, - { - "_key" : "capital-brussels", - "_id" : "worldVertices/capital-brussels", - "_rev" : "_YOn1oMG--P", - "name" : "Brussels", - "type" : "capital" - }, - { - "_key" : "country-denmark", - "_id" : "worldVertices/country-denmark", - "_rev" : "_YOn1oM---D", - "name" : "Denmark", - "type" : "country", - "code" : "DNK" - }, - { - "_key" : "country-albania", - "_id" : "worldVertices/country-albania", - "_rev" : "_YOn1oGm--N", - "name" : "Albania", - "type" : "country", - "code" : "ALB" - }, - { - "_key" : "capital-berlin", - "_id" : "worldVertices/capital-berlin", - "_rev" : "_YOn1oMG--H", - "name" : "Berlin", - "type" : "capital" - }, - { - "_key" : "capital-buenos-aires", - "_id" : "worldVertices/capital-buenos-aires", - "_rev" : "_YOn1oMK--_", - "name" : "Buenos Aires", - "type" : "capital" - }, - { - "_key" : "capital-quito", - "_id" : "worldVertices/capital-quito", - "_rev" : "_YOn1oMS--D", - "name" : "Quito", - "type" : "capital" - }, - { - "_key" : "country-france", - "_id" : "worldVertices/country-france", - "_rev" : "_YOn1oMC--H", - "name" : "France", - "type" : "country", - "code" : "FRA" - }, - { - "_key" : "country-colombia", - "_id" : "worldVertices/country-colombia", - "_rev" : "_YOn1oL6--_", - "name" : "Colombia", - "type" : "country", - "code" : "COL" - }, - { - "_key" : "country-bulgaria", - "_id" : "worldVertices/country-bulgaria", - "_rev" : "_YOn1oJW--_", - "name" : "Bulgaria", - "type" : "country", - "code" : "BGR" - }, - { - "_key" : "continent-north-america", - "_id" : "worldVertices/continent-north-america", - "_rev" : "_YOn1oGm--H", - "name" : "North America", - "type" : "continent" - }, - { - "_key" : "capital-vienna", - "_id" : "worldVertices/capital-vienna", - "_rev" : "_YOn1oMW--D", - "name" : "Vienna", - "type" : "capital" - }, - { - "_key" : "country-bahamas", - "_id" : "worldVertices/country-bahamas", - "_rev" : "_YOn1oI---_", - "name" : "Bahamas", - "type" : "country", - "code" : "BHS" - }, - { - "_key" : "continent-asia", - "_id" : "worldVertices/continent-asia", - "_rev" : "_YOn1oGm--B", - "name" : "Asia", - "type" : "continent" - }, - { - "_key" : "country-barbados", - "_id" : "worldVertices/country-barbados", - "_rev" : "_YOn1oI---F", - "name" : "Barbados", - "type" : "country", - "code" : "BRB" - }, - { - "_key" : "capital-n-djamena", - "_id" : "worldVertices/capital-n-djamena", - "_rev" : "_YOn1oMO--J", - "name" : "N'Djamena", - "type" : "capital" - }, - { - "_key" : "capital-ouagadougou", - "_id" : "worldVertices/capital-ouagadougou", - "_rev" : "_YOn1oMO--N", - "name" : "Ouagadougou", - "type" : "capital" - }, - { - "_key" : "capital-bogota", - "_id" : "worldVertices/capital-bogota", - "_rev" : "_YOn1oMG--J", - "name" : "Bogota", - "type" : "capital" - }, - { - "_key" : "country-brunei", - "_id" : "worldVertices/country-brunei", - "_rev" : "_YOn1oJS--J", - "name" : "Brunei", - "type" : "country", - "code" : "BRN" - }, - { - "_key" : "capital-asmara", - "_id" : "worldVertices/capital-asmara", - "_rev" : "_YOn1oMG--B", - "name" : "Asmara", - "type" : "capital" - }, - { - "_key" : "capital-cairo", - "_id" : "worldVertices/capital-cairo", - "_rev" : "_YOn1oMK--D", - "name" : "Cairo", - "type" : "capital" - }, - { - "_key" : "capital-kabul", - "_id" : "worldVertices/capital-kabul", - "_rev" : "_YOn1oMO--_", - "name" : "Kabul", - "type" : "capital" - }, - { - "_key" : "capital-nassau", - "_id" : "worldVertices/capital-nassau", - "_rev" : "_YOn1oMO--H", - "name" : "Nassau", - "type" : "capital" - }, - { - "_key" : "capital-beijing", - "_id" : "worldVertices/capital-beijing", - "_rev" : "_YOn1oMG--F", - "name" : "Beijing", - "type" : "capital" - }, - { - "_key" : "country-canada", - "_id" : "worldVertices/country-canada", - "_rev" : "_YOn1oKq--_", - "name" : "Canada", - "type" : "country", - "code" : "CAN" - }, - { - "_key" : "continent-europe", - "_id" : "worldVertices/continent-europe", - "_rev" : "_YOn1oGm--F", - "name" : "Europe", - "type" : "continent" - }, - { - "_key" : "capital-luanda", - "_id" : "worldVertices/capital-luanda", - "_rev" : "_YOn1oMO--D", - "name" : "Luanda", - "type" : "capital" - }, - { - "_key" : "country-australia", - "_id" : "worldVertices/country-australia", - "_rev" : "_YOn1oH6--B", - "name" : "Australia", - "type" : "country", - "code" : "AUS" - }, - { - "_key" : "capital-sarajevo", - "_id" : "worldVertices/capital-sarajevo", - "_rev" : "_YOn1oMS--J", - "name" : "Sarajevo", - "type" : "capital" - }, - { - "_key" : "country-andorra", - "_id" : "worldVertices/country-andorra", - "_rev" : "_YOn1oGq--B", - "name" : "Andorra", - "type" : "country", - "code" : "AND" - }, - { - "_key" : "country-bosnia-and-herzegovina", - "_id" : "worldVertices/country-bosnia-and-herzegovina", - "_rev" : "_YOn1oJS--D", - "name" : "Bosnia and Herzegovina", - "type" : "country", - "code" : "BIH" - } -] -arangosh> db.worldEdges.toArray(); -[ - { - "_key" : "131691", - "_id" : "worldEdges/131691", - "_from" : "worldVertices/capital-bogota", - "_to" : "worldVertices/country-colombia", - "_rev" : "_YOn1oNS--_", - "type" : "is-in" - }, - { - "_key" : "131601", - "_id" : "worldEdges/131601", - "_from" : "worldVertices/country-bosnia-and-herzegovina", - "_to" : "worldVertices/continent-europe", - "_rev" : "_YOn1oM6--H", - "type" : "is-in" - }, - { - "_key" : "131589", - "_id" : "worldEdges/131589", - "_from" : "worldVertices/country-barbados", - "_to" : "worldVertices/continent-north-america", - "_rev" : "_YOn1oM6--_", - "type" : "is-in" - }, - { - "_key" : "131733", - "_id" : "worldEdges/131733", - "_from" : "worldVertices/capital-luanda", - "_to" : "worldVertices/country-angola", - "_rev" : "_YOn1oNe--H", - "type" : "is-in" - }, - { - "_key" : "131559", - "_id" : "worldEdges/131559", - "_from" : "worldVertices/country-algeria", - "_to" : "worldVertices/continent-africa", - "_rev" : "_YOn1oMy--B", - "type" : "is-in" - }, - { - "_key" : "131760", - "_id" : "worldEdges/131760", - "_from" : "worldVertices/capital-quito", - "_to" : "worldVertices/country-ecuador", - "_rev" : "_YOn1oNm--B", - "type" : "is-in" - }, - { - "_key" : "131544", - "_id" : "worldEdges/131544", - "_from" : "worldVertices/continent-europe", - "_to" : "worldVertices/world", - "_rev" : "_YOn1oMa--H", - "type" : "is-in" - }, - { - "_key" : "131712", - "_id" : "worldEdges/131712", - "_from" : "worldVertices/capital-canberra", - "_to" : "worldVertices/country-australia", - "_rev" : "_YOn1oNa--D", - "type" : "is-in" - }, - { - "_key" : "131565", - "_id" : "worldEdges/131565", - "_from" : "worldVertices/country-angola", - "_to" : "worldVertices/continent-africa", - "_rev" : "_YOn1oMy--F", - "type" : "is-in" - }, - { - "_key" : "131670", - "_id" : "worldEdges/131670", - "_from" : "worldVertices/country-people-s-republic-of-china", - "_to" : "worldVertices/continent-asia", - "_rev" : "_YOn1oNK--J", - "type" : "is-in" - }, - { - "_key" : "131534", - "_id" : "worldEdges/131534", - "_from" : "worldVertices/continent-africa", - "_to" : "worldVertices/world", - "_rev" : "_YOn1oMa--B", - "type" : "is-in" - }, - { - "_key" : "131790", - "_id" : "worldEdges/131790", - "_from" : "worldVertices/capital-zagreb", - "_to" : "worldVertices/country-croatia", - "_rev" : "_YOn1oNq--J", - "type" : "is-in" - }, - { - "_key" : "131688", - "_id" : "worldEdges/131688", - "_from" : "worldVertices/capital-berlin", - "_to" : "worldVertices/country-germany", - "_rev" : "_YOn1oNO--J", - "type" : "is-in" - }, - { - "_key" : "131739", - "_id" : "worldEdges/131739", - "_from" : "worldVertices/capital-nassau", - "_to" : "worldVertices/country-bahamas", - "_rev" : "_YOn1oNi--_", - "type" : "is-in" - }, - { - "_key" : "131694", - "_id" : "worldEdges/131694", - "_from" : "worldVertices/capital-brasilia", - "_to" : "worldVertices/country-brazil", - "_rev" : "_YOn1oNS--B", - "type" : "is-in" - }, - { - "_key" : "131775", - "_id" : "worldEdges/131775", - "_from" : "worldVertices/capital-thimphu", - "_to" : "worldVertices/country-bhutan", - "_rev" : "_YOn1oNq--_", - "type" : "is-in" - }, - { - "_key" : "131763", - "_id" : "worldEdges/131763", - "_from" : "worldVertices/capital-saint-john-s", - "_to" : "worldVertices/country-antigua-and-barbuda", - "_rev" : "_YOn1oNm--D", - "type" : "is-in" - }, - { - "_key" : "131682", - "_id" : "worldEdges/131682", - "_from" : "worldVertices/capital-bandar-seri-begawan", - "_to" : "worldVertices/country-brunei", - "_rev" : "_YOn1oNO--F", - "type" : "is-in" - }, - { - "_key" : "131700", - "_id" : "worldEdges/131700", - "_from" : "worldVertices/capital-brussels", - "_to" : "worldVertices/country-belgium", - "_rev" : "_YOn1oNS--F", - "type" : "is-in" - }, - { - "_key" : "131619", - "_id" : "worldEdges/131619", - "_from" : "worldVertices/country-burundi", - "_to" : "worldVertices/continent-africa", - "_rev" : "_YOn1oN---J", - "type" : "is-in" - }, - { - "_key" : "131769", - "_id" : "worldEdges/131769", - "_from" : "worldVertices/capital-sarajevo", - "_to" : "worldVertices/country-bosnia-and-herzegovina", - "_rev" : "_YOn1oNm--H", - "type" : "is-in" - }, - { - "_key" : "131646", - "_id" : "worldEdges/131646", - "_from" : "worldVertices/country-czech-republic", - "_to" : "worldVertices/continent-europe", - "_rev" : "_YOn1oNG--D", - "type" : "is-in" - }, - { - "_key" : "131649", - "_id" : "worldEdges/131649", - "_from" : "worldVertices/country-denmark", - "_to" : "worldVertices/continent-europe", - "_rev" : "_YOn1oNG--F", - "type" : "is-in" - }, - { - "_key" : "131766", - "_id" : "worldEdges/131766", - "_from" : "worldVertices/capital-santiago", - "_to" : "worldVertices/country-chile", - "_rev" : "_YOn1oNm--F", - "type" : "is-in" - }, - { - "_key" : "131655", - "_id" : "worldEdges/131655", - "_from" : "worldVertices/country-egypt", - "_to" : "worldVertices/continent-africa", - "_rev" : "_YOn1oNK--_", - "type" : "is-in" - }, - { - "_key" : "131538", - "_id" : "worldEdges/131538", - "_from" : "worldVertices/continent-asia", - "_to" : "worldVertices/world", - "_rev" : "_YOn1oMa--D", - "type" : "is-in" - }, - { - "_key" : "131562", - "_id" : "worldEdges/131562", - "_from" : "worldVertices/country-andorra", - "_to" : "worldVertices/continent-europe", - "_rev" : "_YOn1oMy--D", - "type" : "is-in" - }, - { - "_key" : "131742", - "_id" : "worldEdges/131742", - "_from" : "worldVertices/capital-n-djamena", - "_to" : "worldVertices/country-chad", - "_rev" : "_YOn1oNi--B", - "type" : "is-in" - }, - { - "_key" : "131547", - "_id" : "worldEdges/131547", - "_from" : "worldVertices/continent-north-america", - "_to" : "worldVertices/world", - "_rev" : "_YOn1oMu--_", - "type" : "is-in" - }, - { - "_key" : "131631", - "_id" : "worldEdges/131631", - "_from" : "worldVertices/country-chad", - "_to" : "worldVertices/continent-africa", - "_rev" : "_YOn1oNC--F", - "type" : "is-in" - }, - { - "_key" : "131571", - "_id" : "worldEdges/131571", - "_from" : "worldVertices/country-argentina", - "_to" : "worldVertices/continent-south-america", - "_rev" : "_YOn1oM2--_", - "type" : "is-in" - }, - { - "_key" : "131748", - "_id" : "worldEdges/131748", - "_from" : "worldVertices/capital-ouagadougou", - "_to" : "worldVertices/country-burkina-faso", - "_rev" : "_YOn1oNi--F", - "type" : "is-in" - }, - { - "_key" : "131667", - "_id" : "worldEdges/131667", - "_from" : "worldVertices/country-germany", - "_to" : "worldVertices/continent-europe", - "_rev" : "_YOn1oNK--H", - "type" : "is-in" - }, - { - "_key" : "131721", - "_id" : "worldEdges/131721", - "_from" : "worldVertices/capital-gaborone", - "_to" : "worldVertices/country-botswana", - "_rev" : "_YOn1oNe--_", - "type" : "is-in" - }, - { - "_key" : "131787", - "_id" : "worldEdges/131787", - "_from" : "worldVertices/capital-yaounde", - "_to" : "worldVertices/country-cameroon", - "_rev" : "_YOn1oNq--H", - "type" : "is-in" - }, - { - "_key" : "131598", - "_id" : "worldEdges/131598", - "_from" : "worldVertices/country-bolivia", - "_to" : "worldVertices/continent-south-america", - "_rev" : "_YOn1oM6--F", - "type" : "is-in" - }, - { - "_key" : "131715", - "_id" : "worldEdges/131715", - "_from" : "worldVertices/capital-copenhagen", - "_to" : "worldVertices/country-denmark", - "_rev" : "_YOn1oNa--F", - "type" : "is-in" - }, - { - "_key" : "131745", - "_id" : "worldEdges/131745", - "_from" : "worldVertices/capital-ottawa", - "_to" : "worldVertices/country-canada", - "_rev" : "_YOn1oNi--D", - "type" : "is-in" - }, - { - "_key" : "131622", - "_id" : "worldEdges/131622", - "_from" : "worldVertices/country-cambodia", - "_to" : "worldVertices/continent-asia", - "_rev" : "_YOn1oNC--_", - "type" : "is-in" - }, - { - "_key" : "131592", - "_id" : "worldEdges/131592", - "_from" : "worldVertices/country-belgium", - "_to" : "worldVertices/continent-europe", - "_rev" : "_YOn1oM6--B", - "type" : "is-in" - }, - { - "_key" : "131580", - "_id" : "worldEdges/131580", - "_from" : "worldVertices/country-bahamas", - "_to" : "worldVertices/continent-north-america", - "_rev" : "_YOn1oM2--F", - "type" : "is-in" - }, - { - "_key" : "131586", - "_id" : "worldEdges/131586", - "_from" : "worldVertices/country-bangladesh", - "_to" : "worldVertices/continent-asia", - "_rev" : "_YOn1oM2--J", - "type" : "is-in" - }, - { - "_key" : "131718", - "_id" : "worldEdges/131718", - "_from" : "worldVertices/capital-dhaka", - "_to" : "worldVertices/country-bangladesh", - "_rev" : "_YOn1oNa--H", - "type" : "is-in" - }, - { - "_key" : "131679", - "_id" : "worldEdges/131679", - "_from" : "worldVertices/capital-asmara", - "_to" : "worldVertices/country-eritrea", - "_rev" : "_YOn1oNO--D", - "type" : "is-in" - }, - { - "_key" : "131574", - "_id" : "worldEdges/131574", - "_from" : "worldVertices/country-australia", - "_to" : "worldVertices/continent-australia", - "_rev" : "_YOn1oM2--B", - "type" : "is-in" - }, - { - "_key" : "131541", - "_id" : "worldEdges/131541", - "_from" : "worldVertices/continent-australia", - "_to" : "worldVertices/world", - "_rev" : "_YOn1oMa--F", - "type" : "is-in" - }, - { - "_key" : "131553", - "_id" : "worldEdges/131553", - "_from" : "worldVertices/country-afghanistan", - "_to" : "worldVertices/continent-asia", - "_rev" : "_YOn1oMu--D", - "type" : "is-in" - }, - { - "_key" : "131643", - "_id" : "worldEdges/131643", - "_from" : "worldVertices/country-croatia", - "_to" : "worldVertices/continent-europe", - "_rev" : "_YOn1oNG--B", - "type" : "is-in" - }, - { - "_key" : "131664", - "_id" : "worldEdges/131664", - "_from" : "worldVertices/country-france", - "_to" : "worldVertices/continent-europe", - "_rev" : "_YOn1oNK--F", - "type" : "is-in" - }, - { - "_key" : "131685", - "_id" : "worldEdges/131685", - "_from" : "worldVertices/capital-beijing", - "_to" : "worldVertices/country-people-s-republic-of-china", - "_rev" : "_YOn1oNO--H", - "type" : "is-in" - }, - { - "_key" : "131604", - "_id" : "worldEdges/131604", - "_from" : "worldVertices/country-botswana", - "_to" : "worldVertices/continent-africa", - "_rev" : "_YOn1oN---_", - "type" : "is-in" - }, - { - "_key" : "131778", - "_id" : "worldEdges/131778", - "_from" : "worldVertices/capital-tirana", - "_to" : "worldVertices/country-albania", - "_rev" : "_YOn1oNq--B", - "type" : "is-in" - }, - { - "_key" : "131583", - "_id" : "worldEdges/131583", - "_from" : "worldVertices/country-bahrain", - "_to" : "worldVertices/continent-asia", - "_rev" : "_YOn1oM2--H", - "type" : "is-in" - }, - { - "_key" : "131568", - "_id" : "worldEdges/131568", - "_from" : "worldVertices/country-antigua-and-barbuda", - "_to" : "worldVertices/continent-north-america", - "_rev" : "_YOn1oMy--H", - "type" : "is-in" - }, - { - "_key" : "131676", - "_id" : "worldEdges/131676", - "_from" : "worldVertices/capital-andorra-la-vella", - "_to" : "worldVertices/country-andorra", - "_rev" : "_YOn1oNO--B", - "type" : "is-in" - }, - { - "_key" : "131652", - "_id" : "worldEdges/131652", - "_from" : "worldVertices/country-ecuador", - "_to" : "worldVertices/continent-south-america", - "_rev" : "_YOn1oNG--H", - "type" : "is-in" - }, - { - "_key" : "131658", - "_id" : "worldEdges/131658", - "_from" : "worldVertices/country-eritrea", - "_to" : "worldVertices/continent-africa", - "_rev" : "_YOn1oNK--B", - "type" : "is-in" - }, - { - "_key" : "131736", - "_id" : "worldEdges/131736", - "_from" : "worldVertices/capital-manama", - "_to" : "worldVertices/country-bahrain", - "_rev" : "_YOn1oNe--J", - "type" : "is-in" - }, - { - "_key" : "131661", - "_id" : "worldEdges/131661", - "_from" : "worldVertices/country-finland", - "_to" : "worldVertices/continent-europe", - "_rev" : "_YOn1oNK--D", - "type" : "is-in" - }, - { - "_key" : "131754", - "_id" : "worldEdges/131754", - "_from" : "worldVertices/capital-phnom-penh", - "_to" : "worldVertices/country-cambodia", - "_rev" : "_YOn1oNi--J", - "type" : "is-in" - }, - { - "_key" : "131706", - "_id" : "worldEdges/131706", - "_from" : "worldVertices/capital-bujumbura", - "_to" : "worldVertices/country-burundi", - "_rev" : "_YOn1oNa--_", - "type" : "is-in" - }, - { - "_key" : "131697", - "_id" : "worldEdges/131697", - "_from" : "worldVertices/capital-bridgetown", - "_to" : "worldVertices/country-barbados", - "_rev" : "_YOn1oNS--D", - "type" : "is-in" - }, - { - "_key" : "131703", - "_id" : "worldEdges/131703", - "_from" : "worldVertices/capital-buenos-aires", - "_to" : "worldVertices/country-argentina", - "_rev" : "_YOn1oNS--H", - "type" : "is-in" - }, - { - "_key" : "131577", - "_id" : "worldEdges/131577", - "_from" : "worldVertices/country-austria", - "_to" : "worldVertices/continent-europe", - "_rev" : "_YOn1oM2--D", - "type" : "is-in" - }, - { - "_key" : "131751", - "_id" : "worldEdges/131751", - "_from" : "worldVertices/capital-paris", - "_to" : "worldVertices/country-france", - "_rev" : "_YOn1oNi--H", - "type" : "is-in" - }, - { - "_key" : "131550", - "_id" : "worldEdges/131550", - "_from" : "worldVertices/continent-south-america", - "_to" : "worldVertices/world", - "_rev" : "_YOn1oMu--B", - "type" : "is-in" - }, - { - "_key" : "131628", - "_id" : "worldEdges/131628", - "_from" : "worldVertices/country-canada", - "_to" : "worldVertices/continent-north-america", - "_rev" : "_YOn1oNC--D", - "type" : "is-in" - }, - { - "_key" : "131709", - "_id" : "worldEdges/131709", - "_from" : "worldVertices/capital-cairo", - "_to" : "worldVertices/country-egypt", - "_rev" : "_YOn1oNa--B", - "type" : "is-in" - }, - { - "_key" : "131607", - "_id" : "worldEdges/131607", - "_from" : "worldVertices/country-brazil", - "_to" : "worldVertices/continent-south-america", - "_rev" : "_YOn1oN---B", - "type" : "is-in" - }, - { - "_key" : "131673", - "_id" : "worldEdges/131673", - "_from" : "worldVertices/capital-algiers", - "_to" : "worldVertices/country-algeria", - "_rev" : "_YOn1oNO--_", - "type" : "is-in" - }, - { - "_key" : "131634", - "_id" : "worldEdges/131634", - "_from" : "worldVertices/country-chile", - "_to" : "worldVertices/continent-south-america", - "_rev" : "_YOn1oNC--H", - "type" : "is-in" - }, - { - "_key" : "131757", - "_id" : "worldEdges/131757", - "_from" : "worldVertices/capital-prague", - "_to" : "worldVertices/country-czech-republic", - "_rev" : "_YOn1oNm--_", - "type" : "is-in" - }, - { - "_key" : "131616", - "_id" : "worldEdges/131616", - "_from" : "worldVertices/country-burkina-faso", - "_to" : "worldVertices/continent-africa", - "_rev" : "_YOn1oN---H", - "type" : "is-in" - }, - { - "_key" : "131784", - "_id" : "worldEdges/131784", - "_from" : "worldVertices/capital-yamoussoukro", - "_to" : "worldVertices/country-cote-d-ivoire", - "_rev" : "_YOn1oNq--F", - "type" : "is-in" - }, - { - "_key" : "131556", - "_id" : "worldEdges/131556", - "_from" : "worldVertices/country-albania", - "_to" : "worldVertices/continent-europe", - "_rev" : "_YOn1oMy--_", - "type" : "is-in" - }, - { - "_key" : "131727", - "_id" : "worldEdges/131727", - "_from" : "worldVertices/capital-kabul", - "_to" : "worldVertices/country-afghanistan", - "_rev" : "_YOn1oNe--D", - "type" : "is-in" - }, - { - "_key" : "131637", - "_id" : "worldEdges/131637", - "_from" : "worldVertices/country-colombia", - "_to" : "worldVertices/continent-south-america", - "_rev" : "_YOn1oNC--J", - "type" : "is-in" - }, - { - "_key" : "131724", - "_id" : "worldEdges/131724", - "_from" : "worldVertices/capital-helsinki", - "_to" : "worldVertices/country-finland", - "_rev" : "_YOn1oNe--B", - "type" : "is-in" - }, - { - "_key" : "131640", - "_id" : "worldEdges/131640", - "_from" : "worldVertices/country-cote-d-ivoire", - "_to" : "worldVertices/continent-africa", - "_rev" : "_YOn1oNG--_", - "type" : "is-in" - }, - { - "_key" : "131625", - "_id" : "worldEdges/131625", - "_from" : "worldVertices/country-cameroon", - "_to" : "worldVertices/continent-africa", - "_rev" : "_YOn1oNC--B", - "type" : "is-in" - }, - { - "_key" : "131610", - "_id" : "worldEdges/131610", - "_from" : "worldVertices/country-brunei", - "_to" : "worldVertices/continent-asia", - "_rev" : "_YOn1oN---D", - "type" : "is-in" - }, - { - "_key" : "131781", - "_id" : "worldEdges/131781", - "_from" : "worldVertices/capital-vienna", - "_to" : "worldVertices/country-austria", - "_rev" : "_YOn1oNq--D", - "type" : "is-in" - }, - { - "_key" : "131772", - "_id" : "worldEdges/131772", - "_from" : "worldVertices/capital-sofia", - "_to" : "worldVertices/country-bulgaria", - "_rev" : "_YOn1oNm--J", - "type" : "is-in" - }, - { - "_key" : "131730", - "_id" : "worldEdges/131730", - "_from" : "worldVertices/capital-la-paz", - "_to" : "worldVertices/country-bolivia", - "_rev" : "_YOn1oNe--F", - "type" : "is-in" - }, - { - "_key" : "131595", - "_id" : "worldEdges/131595", - "_from" : "worldVertices/country-bhutan", - "_to" : "worldVertices/continent-asia", - "_rev" : "_YOn1oM6--D", - "type" : "is-in" - }, - { - "_key" : "131613", - "_id" : "worldEdges/131613", - "_from" : "worldVertices/country-bulgaria", - "_to" : "worldVertices/continent-europe", - "_rev" : "_YOn1oN---F", - "type" : "is-in" - } -] -arangosh> examples.dropGraph("worldCountry"); -arangosh> var g = examples.loadGraph("worldCountryUnManaged"); -arangosh> examples.dropGraph("worldCountryUnManaged"); diff --git a/Documentation/Examples/job_cancel.generated b/Documentation/Examples/job_cancel.generated deleted file mode 100644 index 6ecb3694c012..000000000000 --- a/Documentation/Examples/job_cancel.generated +++ /dev/null @@ -1,38 +0,0 @@ -shell> curl -X POST --header 'x-arango-async: store' --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF -{ - "query" : "FOR i IN 1..10 FOR j IN 1..10 LET x = sleep(1.0) FILTER i == 5 && j == 5 RETURN 42" -} -EOF - -HTTP/1.1 Accepted -content-type: text/plain; charset=utf-8 -x-arango-async-id: 132268 -x-content-type-options: nosniff - -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/job/pending - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -[ - "132268" -] -shell> curl -X PUT --header 'accept: application/json' --dump - http://localhost:8529/_api/job/132268/cancel - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : true -} -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/job/pending - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -[ - "132268" -] diff --git a/Documentation/Examples/job_delete_01.generated b/Documentation/Examples/job_delete_01.generated deleted file mode 100644 index 34cba5af95a6..000000000000 --- a/Documentation/Examples/job_delete_01.generated +++ /dev/null @@ -1,16 +0,0 @@ -shell> curl -X PUT --header 'x-arango-async: store' --header 'accept: application/json' --dump - http://localhost:8529/_api/version - -HTTP/1.1 Accepted -content-type: text/plain; charset=utf-8 -x-arango-async-id: 132271 -x-content-type-options: nosniff - -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/job/all - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : true -} diff --git a/Documentation/Examples/job_delete_02.generated b/Documentation/Examples/job_delete_02.generated deleted file mode 100644 index ac710108975d..000000000000 --- a/Documentation/Examples/job_delete_02.generated +++ /dev/null @@ -1,34 +0,0 @@ -shell> curl -X PUT --header 'x-arango-async: store' --header 'accept: application/json' --dump - http://localhost:8529/_api/version - -HTTP/1.1 Accepted -content-type: text/plain; charset=utf-8 -x-arango-async-id: 132273 -x-content-type-options: nosniff - -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_admin/time - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "time" : 1550658808.1522949, - "error" : false, - "code" : 200 -} -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/job/expired?stamp=1550658808.1522949 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : true -} -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/job/pending - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -[ ] diff --git a/Documentation/Examples/job_delete_03.generated b/Documentation/Examples/job_delete_03.generated deleted file mode 100644 index 570f4dbcb275..000000000000 --- a/Documentation/Examples/job_delete_03.generated +++ /dev/null @@ -1,16 +0,0 @@ -shell> curl -X PUT --header 'x-arango-async: store' --header 'accept: application/json' --dump - http://localhost:8529/_api/version - -HTTP/1.1 Accepted -content-type: text/plain; charset=utf-8 -x-arango-async-id: 132275 -x-content-type-options: nosniff - -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/job/132275 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : true -} diff --git a/Documentation/Examples/job_delete_04.generated b/Documentation/Examples/job_delete_04.generated deleted file mode 100644 index 90b2b09a5d8f..000000000000 --- a/Documentation/Examples/job_delete_04.generated +++ /dev/null @@ -1,12 +0,0 @@ -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/job/AreYouThere - -HTTP/1.1 Not Found -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "not found", - "code" : 404, - "errorNum" : 404 -} diff --git a/Documentation/Examples/job_fetch_result_01.generated b/Documentation/Examples/job_fetch_result_01.generated deleted file mode 100644 index ae5cb3832474..000000000000 --- a/Documentation/Examples/job_fetch_result_01.generated +++ /dev/null @@ -1,12 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --dump - http://localhost:8529/_api/job - -HTTP/1.1 Bad Request -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "bad parameter", - "code" : 400, - "errorNum" : 400 -} diff --git a/Documentation/Examples/job_fetch_result_02.generated b/Documentation/Examples/job_fetch_result_02.generated deleted file mode 100644 index 4917b074b136..000000000000 --- a/Documentation/Examples/job_fetch_result_02.generated +++ /dev/null @@ -1,12 +0,0 @@ -shell> curl -X PUT --header 'accept: application/json' --dump - http://localhost:8529/_api/job/notthere - -HTTP/1.1 Not Found -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "not found", - "code" : 404, - "errorNum" : 404 -} diff --git a/Documentation/Examples/job_fetch_result_03.generated b/Documentation/Examples/job_fetch_result_03.generated deleted file mode 100644 index bcb31520b18b..000000000000 --- a/Documentation/Examples/job_fetch_result_03.generated +++ /dev/null @@ -1,19 +0,0 @@ -shell> curl -X PUT --header 'x-arango-async: store' --header 'accept: application/json' --dump - http://localhost:8529/_api/version - -HTTP/1.1 Accepted -content-type: text/plain; charset=utf-8 -x-arango-async-id: 132280 -x-content-type-options: nosniff - -shell> curl -X PUT --header 'accept: application/json' --dump - http://localhost:8529/_api/job/132280 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-arango-async-id: 132280 -x-content-type-options: nosniff - -{ - "server" : "arango", - "version" : "3.5.0-devel", - "license" : "enterprise" -} diff --git a/Documentation/Examples/job_fetch_result_04.generated b/Documentation/Examples/job_fetch_result_04.generated deleted file mode 100644 index 72cdf07e2a80..000000000000 --- a/Documentation/Examples/job_fetch_result_04.generated +++ /dev/null @@ -1,24 +0,0 @@ -shell> curl -X PUT --header 'x-arango-async: store' --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/collection <<EOF -{ - "name" : " this name is invalid " -} -EOF - -HTTP/1.1 Accepted -content-type: text/plain; charset=utf-8 -x-arango-async-id: 132282 -x-content-type-options: nosniff - -shell> curl -X PUT --header 'accept: application/json' --dump - http://localhost:8529/_api/job/132282 - -HTTP/1.1 Bad Request -content-type: application/json; charset=utf-8 -x-arango-async-id: 132282 -x-content-type-options: nosniff - -{ - "error" : true, - "errorMessage" : "expected PUT /_api/collection/<collection-name>/<action>", - "code" : 400, - "errorNum" : 400 -} diff --git a/Documentation/Examples/job_getByType_01.generated b/Documentation/Examples/job_getByType_01.generated deleted file mode 100644 index e7af70acab13..000000000000 --- a/Documentation/Examples/job_getByType_01.generated +++ /dev/null @@ -1,16 +0,0 @@ -shell> curl -X PUT --header 'x-arango-async: store' --header 'accept: application/json' --dump - http://localhost:8529/_api/version - -HTTP/1.1 Accepted -content-type: text/plain; charset=utf-8 -x-arango-async-id: 132284 -x-content-type-options: nosniff - -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/job/done - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -[ - "132284" -] diff --git a/Documentation/Examples/job_getByType_02.generated b/Documentation/Examples/job_getByType_02.generated deleted file mode 100644 index 11abfd9db8d3..000000000000 --- a/Documentation/Examples/job_getByType_02.generated +++ /dev/null @@ -1,14 +0,0 @@ -shell> curl -X PUT --header 'x-arango-async: store' --header 'accept: application/json' --dump - http://localhost:8529/_api/version - -HTTP/1.1 Accepted -content-type: text/plain; charset=utf-8 -x-arango-async-id: 132286 -x-content-type-options: nosniff - -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/job/pending - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -[ ] diff --git a/Documentation/Examples/job_getByType_03.generated b/Documentation/Examples/job_getByType_03.generated deleted file mode 100644 index 0efcf76e08c6..000000000000 --- a/Documentation/Examples/job_getByType_03.generated +++ /dev/null @@ -1,34 +0,0 @@ -shell> curl -X POST --header 'x-arango-async: store' --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/transaction <<EOF -{ - "collections" : { - "read" : [ - "_frontend" - ] - }, - "action" : "function () {require('internal').sleep(15.0);}" -} -EOF - -HTTP/1.1 Accepted -content-type: text/plain; charset=utf-8 -x-arango-async-id: 132288 -x-content-type-options: nosniff - -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/job/pending - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -[ - "132288" -] -shell> curl -X DELETE --header 'accept: application/json' --dump - http://localhost:8529/_api/job/132288 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-content-type-options: nosniff - -{ - "result" : true -} diff --git a/Documentation/Examples/job_getStatusById_01.generated b/Documentation/Examples/job_getStatusById_01.generated deleted file mode 100644 index 02672c7fb194..000000000000 --- a/Documentation/Examples/job_getStatusById_01.generated +++ /dev/null @@ -1,19 +0,0 @@ -shell> curl -X PUT --header 'x-arango-async: store' --header 'accept: application/json' --dump - http://localhost:8529/_api/version - -HTTP/1.1 Accepted -content-type: text/plain; charset=utf-8 -x-arango-async-id: 132291 -x-content-type-options: nosniff - -shell> curl -X PUT --header 'accept: application/json' --dump - http://localhost:8529/_api/job/132291 - -HTTP/1.1 OK -content-type: application/json; charset=utf-8 -x-arango-async-id: 132291 -x-content-type-options: nosniff - -{ - "server" : "arango", - "version" : "3.5.0-devel", - "license" : "enterprise" -} diff --git a/Documentation/Examples/job_getStatusById_02.generated b/Documentation/Examples/job_getStatusById_02.generated deleted file mode 100644 index 67eb40de94e4..000000000000 --- a/Documentation/Examples/job_getStatusById_02.generated +++ /dev/null @@ -1,22 +0,0 @@ -shell> curl -X POST --header 'x-arango-async: store' --header 'accept: application/json' --data-binary @- --dump - http://localhost:8529/_api/transaction <<EOF -{ - "collections" : { - "read" : [ - "_frontend" - ] - }, - "action" : "function () {require('internal').sleep(15.0);}" -} -EOF - -HTTP/1.1 Accepted -content-type: text/plain; charset=utf-8 -x-arango-async-id: 132293 -x-content-type-options: nosniff - -shell> curl --header 'accept: application/json' --dump - http://localhost:8529/_api/job/132293 - -HTTP/1.1 No Content -content-type: text/plain; charset=utf-8 -x-content-type-options: nosniff - diff --git a/Documentation/Examples/joinTuples.generated b/Documentation/Examples/joinTuples.generated deleted file mode 100644 index 8df64e1763f7..000000000000 --- a/Documentation/Examples/joinTuples.generated +++ /dev/null @@ -1,53 +0,0 @@ -@Q: - FOR u IN users - FILTER u.active == true - LIMIT 0, 4 - FOR f IN relations - FILTER f.type == @friend && f.friendOf == u.userId - RETURN { - "user" : u.name, - "friendId" : f.thisUser - } -@B -{ - "friend": "friend" -} -@R -[ - { - "user": "Abigail", - "friendId": 3 - }, - { - "user": "Abigail", - "friendId": 2 - }, - { - "user": "Abigail", - "friendId": 4 - }, - { - "user": "Mary", - "friendId": 4 - }, - { - "user": "Mary", - "friendId": 1 - }, - { - "user": "Mariah", - "friendId": 1 - }, - { - "user": "Mariah", - "friendId": 2 - }, - { - "user": "Fred", - "friendId": 2 - }, - { - "user": "Fred", - "friendId": 5 - } -] \ No newline at end of file diff --git a/Documentation/Examples/lastExpressionResult.generated b/Documentation/Examples/lastExpressionResult.generated deleted file mode 100644 index 1fc8976d8b38..000000000000 --- a/Documentation/Examples/lastExpressionResult.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> 42 * 23 -966 diff --git a/Documentation/Examples/lastExpressionResultCaptured.generated b/Documentation/Examples/lastExpressionResultCaptured.generated deleted file mode 100644 index a300df980568..000000000000 --- a/Documentation/Examples/lastExpressionResultCaptured.generated +++ /dev/null @@ -1 +0,0 @@ -arangosh> var calculationResult = 42 * 23 diff --git a/Documentation/Examples/listCurrentConfigOpts.generated b/Documentation/Examples/listCurrentConfigOpts.generated deleted file mode 100644 index c9e38f6edd29..000000000000 --- a/Documentation/Examples/listCurrentConfigOpts.generated +++ /dev/null @@ -1,279 +0,0 @@ -arangosh> db._executeTransaction({ collections: {}, action: function() {return require("internal").options(); } }) -{ - "check-configuration" : false, - "config" : "none", - "configuration" : "none", - "console" : false, - "daemon" : false, - "default-language" : "en_US", - "define" : [ ], - "dump-dependencies" : false, - "dump-options" : false, - "fortune" : false, - "gid" : "", - "hund" : false, - "log" : [ - "info" - ], - "pid-file" : "", - "supervisor" : false, - "uid" : "", - "version" : false, - "working-directory" : "/var/tmp", - "agency.activate" : false, - "agency.compaction-keep-size" : 50000, - "agency.compaction-step-size" : 1000, - "agency.disaster-recovery-id" : "", - "agency.election-timeout-max" : 5, - "agency.election-timeout-min" : 1, - "agency.endpoint" : [ ], - "agency.max-append-size" : 250, - "agency.my-address" : "", - "agency.pool-size" : 1, - "agency.size" : 1, - "agency.supervision" : false, - "agency.supervision-frequency" : 1, - "agency.supervision-grace-period" : 10, - "agency.wait-for-sync" : true, - "arangosearch.threads" : 0, - "arangosearch.threads-limit" : 0, - "audit.hostname" : "", - "audit.output" : [ ], - "cache.rebalancing-interval" : 2000000, - "cache.size" : 3637388288, - "cluster.agency-endpoint" : [ ], - "cluster.agency-prefix" : "", - "cluster.create-waits-for-sync-replication" : true, - "cluster.index-create-timeout" : 3600, - "cluster.my-address" : "", - "cluster.my-advertised-endpoint" : "", - "cluster.my-role" : "", - "cluster.require-persisted-id" : false, - "cluster.synchronous-replication-timeout-factor" : 1, - "cluster.synchronous-replication-timeout-per-4k" : 0.1, - "cluster.system-replication-factor" : 2, - "compaction.db-sleep-time" : 1, - "compaction.dead-documents-threshold" : 16384, - "compaction.dead-size-percent-threshold" : 0.1, - "compaction.dead-size-threshold" : 131072, - "compaction.max-file-size-factor" : 3, - "compaction.max-files" : 3, - "compaction.max-result-file-size" : 134217728, - "compaction.min-interval" : 10, - "compaction.min-small-data-file-size" : 131072, - "database.auto-upgrade" : false, - "database.check-version" : false, - "database.directory" : "/tmp/arangosh_uprJb4/tmp-27793-56941049/data", - "database.force-sync-properties" : true, - "database.ignore-datafile-errors" : false, - "database.init-database" : false, - "database.maximal-journal-size" : 33554432, - "database.password" : "", - "database.required-directory-state" : "any", - "database.restore-admin" : false, - "database.throw-collection-not-loaded-error" : false, - "database.upgrade-check" : true, - "database.wait-for-sync" : false, - "foxx.queues" : true, - "foxx.queues-poll-interval" : 1, - "frontend.proxy-request-check" : true, - "frontend.trusted-proxy" : [ ], - "frontend.version-check" : true, - "http.allow-method-override" : false, - "http.hide-product-header" : false, - "http.keep-alive-timeout" : 300, - "http.trusted-origin" : [ ], - "javascript.allow-admin-execute" : false, - "javascript.app-path" : "/tmp/arangosh_uprJb4/tmp-27793-56941049/apps", - "javascript.copy-installation" : false, - "javascript.enabled" : true, - "javascript.gc-frequency" : 60, - "javascript.gc-interval" : 2000, - "javascript.module-directory" : [ - "/home/steemann/ArangoNoAsan/enterprise/js" - ], - "javascript.script" : [ ], - "javascript.script-parameter" : [ ], - "javascript.startup-directory" : "/home/steemann/ArangoNoAsan/js", - "javascript.v8-contexts" : 16, - "javascript.v8-contexts-max-age" : 60, - "javascript.v8-contexts-max-invocations" : 0, - "javascript.v8-contexts-minimum" : 1, - "javascript.v8-max-heap" : 3072, - "javascript.v8-options" : [ ], - "ldap.async-connect" : false, - "ldap.basedn" : "", - "ldap.binddn" : "", - "ldap.debug" : false, - "ldap.enabled" : false, - "ldap.network-timeout" : 0, - "ldap.port" : 389, - "ldap.prefix" : "", - "ldap.referrals" : false, - "ldap.refresh-rate" : 300, - "ldap.restart" : false, - "ldap.retries" : 1, - "ldap.roles-attribute-name" : "", - "ldap.roles-exclude" : "", - "ldap.roles-include" : "", - "ldap.roles-search" : "", - "ldap.roles-transformation" : [ ], - "ldap.search-attribute" : "uid", - "ldap.search-filter" : "objectClass=*", - "ldap.search-scope" : "sub", - "ldap.serialize-timeout" : 5, - "ldap.serialized" : false, - "ldap.server" : "", - "ldap.suffix" : "", - "ldap.superuser-role" : "", - "ldap.timeout" : 0, - "ldap.tls" : false, - "ldap.tls-cacert-dir" : "", - "ldap.tls-cacert-file" : "", - "ldap.tls-cert-check-strategy" : "hard", - "ldap.tls-version" : "1.2", - "ldap.url" : "", - "log.color" : true, - "log.escape" : true, - "log.file" : "/tmp/arangosh_uprJb4/tmp-27793-56941049/log", - "log.force-direct" : false, - "log.foreground-tty" : true, - "log.keep-logrotate" : false, - "log.level" : [ - "info" - ], - "log.line-number" : false, - "log.output" : [ - "file:///tmp/arangosh_uprJb4/tmp-27793-56941049/log" - ], - "log.performance" : false, - "log.prefix" : "", - "log.request-parameters" : true, - "log.role" : false, - "log.shorten-filenames" : true, - "log.thread" : false, - "log.thread-name" : false, - "log.use-local-time" : false, - "log.use-microtime" : false, - "nonce.size" : 4194304, - "query.cache-entries" : 128, - "query.cache-entries-max-size" : 268435456, - "query.cache-entry-max-size" : 16777216, - "query.cache-include-system-collections" : false, - "query.cache-mode" : "off", - "query.fail-on-warning" : false, - "query.memory-limit" : 0, - "query.optimizer-max-plans" : 128, - "query.registry-ttl" : 600, - "query.slow-streaming-threshold" : 10, - "query.slow-threshold" : 10, - "query.tracking" : true, - "query.tracking-with-bindvars" : true, - "random.generator" : 1, - "replication.active-failover" : false, - "replication.auto-start" : true, - "replication.automatic-failover" : false, - "rocksdb.block-align-data-blocks" : false, - "rocksdb.block-cache-shard-bits" : -1, - "rocksdb.block-cache-size" : 4364865945, - "rocksdb.compaction-read-ahead-size" : 2097152, - "rocksdb.debug-logging" : false, - "rocksdb.delayed_write_rate" : 0, - "rocksdb.dynamic-level-bytes" : true, - "rocksdb.enable-pipelined-write" : false, - "rocksdb.enable-statistics" : false, - "rocksdb.encryption-key-generator" : "", - "rocksdb.encryption-keyfile" : "", - "rocksdb.enforce-block-cache-size-limit" : false, - "rocksdb.intermediate-commit-count" : 1000000, - "rocksdb.intermediate-commit-size" : 536870912, - "rocksdb.level0-compaction-trigger" : 2, - "rocksdb.level0-slowdown-trigger" : 20, - "rocksdb.level0-stop-trigger" : 36, - "rocksdb.max-background-jobs" : 8, - "rocksdb.max-bytes-for-level-base" : 268435456, - "rocksdb.max-bytes-for-level-multiplier" : 10, - "rocksdb.max-subcompactions" : 0, - "rocksdb.max-total-wal-size" : 83886080, - "rocksdb.max-transaction-size" : 18446744073709552000, - "rocksdb.max-write-buffer-number" : 2, - "rocksdb.min-write-buffer-number-to-merge" : 1, - "rocksdb.num-levels" : 7, - "rocksdb.num-threads-priority-high" : 4, - "rocksdb.num-threads-priority-low" : 4, - "rocksdb.num-uncompressed-levels" : 2, - "rocksdb.optimize-filters-for-hits" : false, - "rocksdb.recycle-log-file-num" : 0, - "rocksdb.sync-interval" : 100, - "rocksdb.table-block-size" : 16384, - "rocksdb.throttle" : true, - "rocksdb.total-write-buffer-size" : 5819821260, - "rocksdb.transaction-lock-timeout" : 1000, - "rocksdb.use-direct-io-for-flush-and-compaction" : false, - "rocksdb.use-direct-reads" : false, - "rocksdb.use-file-logging" : false, - "rocksdb.use-fsync" : false, - "rocksdb.wal-directory" : "", - "rocksdb.wal-file-timeout" : 10, - "rocksdb.wal-file-timeout-initial" : 180, - "rocksdb.wal-recovery-skip-corrupted" : false, - "rocksdb.write-buffer-size" : 67108864, - "server.allow-use-database" : false, - "server.authentication" : false, - "server.authentication-system-only" : true, - "server.authentication-timeout" : 0, - "server.authentication-unix-sockets" : true, - "server.check-max-memory-mappings" : true, - "server.descriptors-minimum" : 0, - "server.endpoint" : [ - "tcp://127.0.0.1:18836" - ], - "server.flush-interval" : 1000000, - "server.gid" : "", - "server.io-threads" : 2, - "server.jwt-secret" : "", - "server.jwt-secret-keyfile" : "", - "server.local-authentication" : true, - "server.maintenance-actions-block" : 2, - "server.maintenance-actions-linger" : 3600, - "server.maintenance-threads" : 3, - "server.maximal-queue-size" : 4096, - "server.maximal-threads" : 64, - "server.minimal-threads" : 2, - "server.prio1-size" : 1048576, - "server.rest-server" : true, - "server.scheduler-queue-size" : 128, - "server.statistics" : true, - "server.storage-engine" : "mmfiles", - "server.uid" : "", - "ssl.cafile" : "", - "ssl.cipher-list" : "HIGH:!EXPORT:!aNULL@STRENGTH", - "ssl.ecdh-curve" : "prime256v1", - "ssl.keyfile" : "", - "ssl.options" : 2147485780, - "ssl.protocol" : 5, - "ssl.require-peer-certificate" : false, - "ssl.session-cache" : false, - "tcp.backlog-size" : 64, - "tcp.reuse-address" : true, - "temp.path" : "", - "ttl.frequency" : 30000, - "ttl.max-collection-removes" : 1000000, - "ttl.max-total-removes" : 1000000, - "ttl.only-loaded-collection" : true, - "vst.maxsize" : 30720, - "wal.allow-oversize-entries" : true, - "wal.directory" : "/tmp/arangosh_uprJb4/tmp-27793-56941049/data/journals/", - "wal.flush-timeout" : 15000, - "wal.historic-logfiles" : 10, - "wal.ignore-logfile-errors" : false, - "wal.ignore-recovery-errors" : false, - "wal.logfile-size" : 33554432, - "wal.open-logfiles" : 0, - "wal.reserve-logfiles" : 3, - "wal.slots" : 1048576, - "wal.sync-interval" : 100000, - "wal.throttle-wait" : 15000, - "wal.throttle-when-pending" : 0, - "wal.use-mlock" : false -} diff --git a/Documentation/Examples/loadIndexesIntoMemory.generated b/Documentation/Examples/loadIndexesIntoMemory.generated deleted file mode 100644 index 52437c585daf..000000000000 --- a/Documentation/Examples/loadIndexesIntoMemory.generated +++ /dev/null @@ -1,4 +0,0 @@ -arangosh> db.example.loadIndexesIntoMemory(); -{ - "result" : true -} diff --git a/Documentation/Examples/printFunction.generated b/Documentation/Examples/printFunction.generated deleted file mode 100644 index 42f5e31af5c1..000000000000 --- a/Documentation/Examples/printFunction.generated +++ /dev/null @@ -1,10 +0,0 @@ -arangosh> print({ a: "123", b: [1,2,3], c: "test" }); -{ - "a" : "123", - "b" : [ - 1, - 2, - 3 - ], - "c" : "test" -} diff --git a/Documentation/Examples/queryLimit.generated b/Documentation/Examples/queryLimit.generated deleted file mode 100644 index bb9fee92e480..000000000000 --- a/Documentation/Examples/queryLimit.generated +++ /dev/null @@ -1,48 +0,0 @@ -arangosh> db.five.all().toArray(); -[ - { - "_key" : "132394", - "_id" : "five/132394", - "_rev" : "_YOn1p-y--B", - "name" : "five" - }, - { - "_key" : "132385", - "_id" : "five/132385", - "_rev" : "_YOn1p-u--B", - "name" : "two" - }, - { - "_key" : "132391", - "_id" : "five/132391", - "_rev" : "_YOn1p-y--_", - "name" : "four" - }, - { - "_key" : "132388", - "_id" : "five/132388", - "_rev" : "_YOn1p-u--D", - "name" : "three" - }, - { - "_key" : "132381", - "_id" : "five/132381", - "_rev" : "_YOn1p-u--_", - "name" : "one" - } -] -arangosh> db.five.all().limit(2).toArray(); -[ - { - "_key" : "132394", - "_id" : "five/132394", - "_rev" : "_YOn1p-y--B", - "name" : "five" - }, - { - "_key" : "132385", - "_id" : "five/132385", - "_rev" : "_YOn1p-u--B", - "name" : "two" - } -] diff --git a/Documentation/Examples/querySkip.generated b/Documentation/Examples/querySkip.generated deleted file mode 100644 index ca5f498554af..000000000000 --- a/Documentation/Examples/querySkip.generated +++ /dev/null @@ -1,48 +0,0 @@ -arangosh> db.five.all().toArray(); -[ - { - "_key" : "132416", - "_id" : "five/132416", - "_rev" : "_YOn1p_y--B", - "name" : "two" - }, - { - "_key" : "132412", - "_id" : "five/132412", - "_rev" : "_YOn1p_y--_", - "name" : "one" - }, - { - "_key" : "132425", - "_id" : "five/132425", - "_rev" : "_YOn1p_2--_", - "name" : "five" - }, - { - "_key" : "132422", - "_id" : "five/132422", - "_rev" : "_YOn1p_y--F", - "name" : "four" - }, - { - "_key" : "132419", - "_id" : "five/132419", - "_rev" : "_YOn1p_y--D", - "name" : "three" - } -] -arangosh> db.five.all().skip(3).toArray(); -[ - { - "_key" : "132422", - "_id" : "five/132422", - "_rev" : "_YOn1p_y--F", - "name" : "four" - }, - { - "_key" : "132419", - "_id" : "five/132419", - "_rev" : "_YOn1p_y--D", - "name" : "three" - } -] diff --git a/Documentation/Examples/shellHelp.generated b/Documentation/Examples/shellHelp.generated deleted file mode 100644 index 660e93c539b9..000000000000 --- a/Documentation/Examples/shellHelp.generated +++ /dev/null @@ -1,41 +0,0 @@ -arangosh> db._help(); - ---------------------------- ArangoDatabase (db) help --------------------------- -Administration Functions: - _help() this help - _flushCache() flush and refill collection cache - -Collection Functions: - _collections() list all collections - _collection(<name>) get collection by identifier/name - _create(<name>, <properties>) creates a new collection - _createEdgeCollection(<name>) creates a new edge collection - _drop(<name>) delete a collection - -Document Functions: - _document(<id>) get document by handle (_id) - _replace(<id>, <data>, <overwrite>) overwrite document - _update(<id>, <data>, <overwrite>, partially update document - <keepNull>) - _remove(<id>) delete document - _exists(<id>) checks whether a document exists - _truncate() delete all documents - -Database Management Functions: - _createDatabase(<name>) creates a new database - _dropDatabase(<name>) drops an existing database - _useDatabase(<name>) switches into an existing database - _drop(<name>) delete a collection - _name() name of the current database - -Query / Transaction Functions: - _executeTransaction(<transaction>) execute transaction - _query(<query>) execute AQL query - _createStatement(<data>) create and return AQL query - -View Functions: - _views() list all views - _view(<name>) get view by name - _createView(<name>, <type>, creates a new view - <properties>) - _dropView(<name>) delete a view diff --git a/Documentation/Examples/shellPaste.generated b/Documentation/Examples/shellPaste.generated deleted file mode 100644 index 0a2298f7a130..000000000000 --- a/Documentation/Examples/shellPaste.generated +++ /dev/null @@ -1,23 +0,0 @@ -arangosh> for (var i = 0; i < 10; i ++) { -........> require("@arangodb").print("Hello world " + i + "!\n"); -........> } -Hello world 0! - -Hello world 1! - -Hello world 2! - -Hello world 3! - -Hello world 4! - -Hello world 5! - -Hello world 6! - -Hello world 7! - -Hello world 8! - -Hello world 9! - diff --git a/Documentation/Examples/shellUseDB.generated b/Documentation/Examples/shellUseDB.generated deleted file mode 100644 index 6f77e4bc14ed..000000000000 --- a/Documentation/Examples/shellUseDB.generated +++ /dev/null @@ -1,8 +0,0 @@ -arangosh> db._createDatabase("myapp"); -true -arangosh> db._useDatabase("myapp"); -true -arangosh> db._useDatabase("_system"); -true -arangosh> db._dropDatabase("myapp"); -true diff --git a/Documentation/Examples/usingToArray.generated b/Documentation/Examples/usingToArray.generated deleted file mode 100644 index 8355a9cc6cfa..000000000000 --- a/Documentation/Examples/usingToArray.generated +++ /dev/null @@ -1,36 +0,0 @@ -arangosh> db._create("five") -[ArangoCollection 132471, "five" (type document, status loaded)] -arangosh> for (i = 0; i < 5; i++) db.five.save({value:i}) -arangosh> db.five.toArray() -[ - { - "_key" : "132482", - "_id" : "five/132482", - "_rev" : "_YOn1pYi--B", - "value" : 1 - }, - { - "_key" : "132491", - "_id" : "five/132491", - "_rev" : "_YOn1pYm--D", - "value" : 4 - }, - { - "_key" : "132478", - "_id" : "five/132478", - "_rev" : "_YOn1pYi--_", - "value" : 0 - }, - { - "_key" : "132485", - "_id" : "five/132485", - "_rev" : "_YOn1pYm--_", - "value" : 2 - }, - { - "_key" : "132488", - "_id" : "five/132488", - "_rev" : "_YOn1pYm--B", - "value" : 3 - } -] diff --git a/Documentation/Examples/viewDatabaseCreate.generated b/Documentation/Examples/viewDatabaseCreate.generated deleted file mode 100644 index 42e1168a3d4d..000000000000 --- a/Documentation/Examples/viewDatabaseCreate.generated +++ /dev/null @@ -1,18 +0,0 @@ -arangosh> v = db._createView("example", "arangosearch"); -[ArangoView 132500, "example" (type arangosearch)] -arangosh> v.properties() -{ - "writebufferIdle" : 64, - "writebufferActive" : 0, - "writebufferSizeMax" : 33554432, - "commitIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "cleanupIntervalStep" : 10, - "links" : { - }, - "consolidationIntervalMsec" : 60000 -} -arangosh> db._dropView("example") diff --git a/Documentation/Examples/viewDatabaseDrop.generated b/Documentation/Examples/viewDatabaseDrop.generated deleted file mode 100644 index f741bd4f3801..000000000000 --- a/Documentation/Examples/viewDatabaseDrop.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> db._createView("exampleView", "arangosearch"); -[ArangoView 132507, "exampleView" (type arangosearch)] -arangosh> db._dropView("exampleView"); -arangosh> db._view("exampleView"); -null diff --git a/Documentation/Examples/viewDatabaseGet.generated b/Documentation/Examples/viewDatabaseGet.generated deleted file mode 100644 index 1449a5a47948..000000000000 --- a/Documentation/Examples/viewDatabaseGet.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> view = db._view("example"); -........> // or, alternatively -[ArangoView 132512, "example" (type arangosearch)] -arangosh> view = db["example"] -[ArangoView 132512, "example" (type arangosearch)] diff --git a/Documentation/Examples/viewDatabaseList.generated b/Documentation/Examples/viewDatabaseList.generated deleted file mode 100644 index 276867017412..000000000000 --- a/Documentation/Examples/viewDatabaseList.generated +++ /dev/null @@ -1,5 +0,0 @@ -arangosh> db._views(); -[ - [ArangoView 102, "demoView" (type arangosearch)], - [ArangoView 132517, "exampleView" (type arangosearch)] -] diff --git a/Documentation/Examples/viewDatabaseNameKnown.generated b/Documentation/Examples/viewDatabaseNameKnown.generated deleted file mode 100644 index f548853f8f6a..000000000000 --- a/Documentation/Examples/viewDatabaseNameKnown.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> db._view("demoView"); -[ArangoView 102, "demoView" (type arangosearch)] diff --git a/Documentation/Examples/viewDatabaseNameUnknown.generated b/Documentation/Examples/viewDatabaseNameUnknown.generated deleted file mode 100644 index 4664f57060d9..000000000000 --- a/Documentation/Examples/viewDatabaseNameUnknown.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> db._view("unknown"); -null diff --git a/Documentation/Examples/viewDrop.generated b/Documentation/Examples/viewDrop.generated deleted file mode 100644 index a5f504812faf..000000000000 --- a/Documentation/Examples/viewDrop.generated +++ /dev/null @@ -1,8 +0,0 @@ -arangosh> v = db._createView("example", "arangosearch"); -........> // or -[ArangoView 132526, "example" (type arangosearch)] -arangosh> v = db._view("example"); -[ArangoView 132526, "example" (type arangosearch)] -arangosh> v.drop(); -arangosh> db._view("example"); -null diff --git a/Documentation/Examples/viewGetProperties.generated b/Documentation/Examples/viewGetProperties.generated deleted file mode 100644 index f2fdd5488185..000000000000 --- a/Documentation/Examples/viewGetProperties.generated +++ /dev/null @@ -1,17 +0,0 @@ -arangosh> v = db._view("demoView"); -[ArangoView 102, "demoView" (type arangosearch)] -arangosh> v.properties(); -{ - "writebufferIdle" : 64, - "writebufferActive" : 0, - "writebufferSizeMax" : 33554432, - "commitIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "cleanupIntervalStep" : 10, - "links" : { - }, - "consolidationIntervalMsec" : 60000 -} diff --git a/Documentation/Examples/viewModifyProperties.generated b/Documentation/Examples/viewModifyProperties.generated deleted file mode 100644 index fb3e55ccbb29..000000000000 --- a/Documentation/Examples/viewModifyProperties.generated +++ /dev/null @@ -1,75 +0,0 @@ -arangosh> v = db._view("example"); -[ArangoView 132534, "example" (type arangosearch)] -arangosh> v.properties(); -........> // set cleanupIntervalStep to 12 -{ - "writebufferIdle" : 64, - "writebufferActive" : 0, - "writebufferSizeMax" : 33554432, - "commitIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "cleanupIntervalStep" : 10, - "links" : { - }, - "consolidationIntervalMsec" : 60000 -} -arangosh> v.properties({cleanupIntervalStep: 12}); -........> // add a link -{ - "cleanupIntervalStep" : 12, - "commitIntervalMsec" : 60000, - "consolidationIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "writebufferActive" : 0, - "writebufferIdle" : 64, - "writebufferSizeMax" : 33554432, - "links" : { - } -} -arangosh> v.properties({links: {demo: {}}}) -........> // remove a link -{ - "cleanupIntervalStep" : 12, - "commitIntervalMsec" : 60000, - "consolidationIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "writebufferActive" : 0, - "writebufferIdle" : 64, - "writebufferSizeMax" : 33554432, - "links" : { - "demo" : { - "analyzers" : [ - "identity" - ], - "fields" : { - }, - "includeAllFields" : false, - "storeValues" : "none", - "trackListPositions" : false - } - } -} -arangosh> v.properties({links: {demo: null}}) -{ - "cleanupIntervalStep" : 12, - "commitIntervalMsec" : 60000, - "consolidationIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "writebufferActive" : 0, - "writebufferIdle" : 64, - "writebufferSizeMax" : 33554432, - "links" : { - } -} diff --git a/Documentation/Examples/viewName.generated b/Documentation/Examples/viewName.generated deleted file mode 100644 index cd74decb740c..000000000000 --- a/Documentation/Examples/viewName.generated +++ /dev/null @@ -1,4 +0,0 @@ -arangosh> v = db._view("demoView"); -[ArangoView 102, "demoView" (type arangosearch)] -arangosh> v.name(); -demoView diff --git a/Documentation/Examples/viewRename.generated b/Documentation/Examples/viewRename.generated deleted file mode 100644 index e9fccc3d9580..000000000000 --- a/Documentation/Examples/viewRename.generated +++ /dev/null @@ -1,7 +0,0 @@ -arangosh> v = db._createView("example", "arangosearch"); -[ArangoView 132558, "example" (type arangosearch)] -arangosh> v.name(); -example -arangosh> v.rename("exampleRenamed"); -arangosh> v.name(); -exampleRenamed diff --git a/Documentation/Examples/viewType.generated b/Documentation/Examples/viewType.generated deleted file mode 100644 index 8a037e36da70..000000000000 --- a/Documentation/Examples/viewType.generated +++ /dev/null @@ -1,4 +0,0 @@ -arangosh> v = db._view("demoView"); -[ArangoView 102, "demoView" (type arangosearch)] -arangosh> v.type(); -arangosearch diff --git a/Documentation/Examples/viewUsage_01.generated b/Documentation/Examples/viewUsage_01.generated deleted file mode 100644 index e1bf614f58f4..000000000000 --- a/Documentation/Examples/viewUsage_01.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> view = db._createView("myView", "arangosearch", {}); -[ArangoView 132579, "myView" (type arangosearch)] diff --git a/Documentation/Examples/viewUsage_02.generated b/Documentation/Examples/viewUsage_02.generated deleted file mode 100644 index 0f215bca1afa..000000000000 --- a/Documentation/Examples/viewUsage_02.generated +++ /dev/null @@ -1,2 +0,0 @@ -arangosh> view = db._view("myView"); -[ArangoView 132579, "myView" (type arangosearch)] diff --git a/Documentation/Examples/viewUsage_03.generated b/Documentation/Examples/viewUsage_03.generated deleted file mode 100644 index e9877012a1e7..000000000000 --- a/Documentation/Examples/viewUsage_03.generated +++ /dev/null @@ -1,15 +0,0 @@ -arangosh> view.properties(); -{ - "writebufferIdle" : 64, - "writebufferActive" : 0, - "writebufferSizeMax" : 33554432, - "commitIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "cleanupIntervalStep" : 10, - "links" : { - }, - "consolidationIntervalMsec" : 60000 -} diff --git a/Documentation/Examples/viewUsage_04.generated b/Documentation/Examples/viewUsage_04.generated deleted file mode 100644 index 52c81e899990..000000000000 --- a/Documentation/Examples/viewUsage_04.generated +++ /dev/null @@ -1,15 +0,0 @@ -arangosh> view.properties({cleanupIntervalStep: 12}); -{ - "cleanupIntervalStep" : 12, - "commitIntervalMsec" : 60000, - "consolidationIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "writebufferActive" : 0, - "writebufferIdle" : 64, - "writebufferSizeMax" : 33554432, - "links" : { - } -} diff --git a/Documentation/Examples/viewUsage_05.generated b/Documentation/Examples/viewUsage_05.generated deleted file mode 100644 index 4e709dd18968..000000000000 --- a/Documentation/Examples/viewUsage_05.generated +++ /dev/null @@ -1,25 +0,0 @@ -arangosh> view.properties({links: {colA: {includeAllFields: true}}}); -{ - "cleanupIntervalStep" : 12, - "commitIntervalMsec" : 60000, - "consolidationIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "writebufferActive" : 0, - "writebufferIdle" : 64, - "writebufferSizeMax" : 33554432, - "links" : { - "colA" : { - "analyzers" : [ - "identity" - ], - "fields" : { - }, - "includeAllFields" : true, - "storeValues" : "none", - "trackListPositions" : false - } - } -} diff --git a/Documentation/Examples/viewUsage_06.generated b/Documentation/Examples/viewUsage_06.generated deleted file mode 100644 index 1c3b4dd87014..000000000000 --- a/Documentation/Examples/viewUsage_06.generated +++ /dev/null @@ -1,37 +0,0 @@ -arangosh> view.properties({links: {colB: {fields: {text: {}}}}}); -{ - "cleanupIntervalStep" : 12, - "commitIntervalMsec" : 60000, - "consolidationIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "writebufferActive" : 0, - "writebufferIdle" : 64, - "writebufferSizeMax" : 33554432, - "links" : { - "colA" : { - "analyzers" : [ - "identity" - ], - "fields" : { - }, - "includeAllFields" : true, - "storeValues" : "none", - "trackListPositions" : false - }, - "colB" : { - "analyzers" : [ - "identity" - ], - "fields" : { - "text" : { - } - }, - "includeAllFields" : false, - "storeValues" : "none", - "trackListPositions" : false - } - } -} diff --git a/Documentation/Examples/viewUsage_07.generated b/Documentation/Examples/viewUsage_07.generated deleted file mode 100644 index 23e6a2ad3770..000000000000 --- a/Documentation/Examples/viewUsage_07.generated +++ /dev/null @@ -1,27 +0,0 @@ -arangosh> view.properties({links: {colA: null}}); -{ - "cleanupIntervalStep" : 12, - "commitIntervalMsec" : 60000, - "consolidationIntervalMsec" : 60000, - "consolidationPolicy" : { - "type" : "bytes_accum", - "threshold" : 0.10000000149011612 - }, - "writebufferActive" : 0, - "writebufferIdle" : 64, - "writebufferSizeMax" : 33554432, - "links" : { - "colB" : { - "analyzers" : [ - "identity" - ], - "fields" : { - "text" : { - } - }, - "includeAllFields" : false, - "storeValues" : "none", - "trackListPositions" : false - } - } -} diff --git a/Documentation/Examples/viewUsage_08.generated b/Documentation/Examples/viewUsage_08.generated deleted file mode 100644 index bce6359bd6f2..000000000000 --- a/Documentation/Examples/viewUsage_08.generated +++ /dev/null @@ -1 +0,0 @@ -arangosh> db._dropView("myView"); diff --git a/Documentation/Examples/working_with_date_time.generated b/Documentation/Examples/working_with_date_time.generated deleted file mode 100644 index b34fc8a1fbd5..000000000000 --- a/Documentation/Examples/working_with_date_time.generated +++ /dev/null @@ -1,28 +0,0 @@ -arangosh> db._create("exampleTime"); -[ArangoCollection 132634, "exampleTime" (type document, status loaded)] -arangosh> var timestamps = ["2014-05-07T14:19:09.522","2014-05-07T21:19:09.522","2014-05-08T04:19:09.522","2014-05-08T11:19:09.522","2014-05-08T18:19:09.522"]; -arangosh> for (i = 0; i < 5; i++) db.exampleTime.save({value:i, ts: timestamps[i]}) -arangosh> db._query("FOR d IN exampleTime FILTER d.ts > '2014-05-07T14:19:09.522' and d.ts < '2014-05-08T18:19:09.522' RETURN d").toArray() -[ - { - "_key" : "132651", - "_id" : "exampleTime/132651", - "_rev" : "_YOn1p6q--D", - "value" : 3, - "ts" : "2014-05-08T11:19:09.522" - }, - { - "_key" : "132645", - "_id" : "exampleTime/132645", - "_rev" : "_YOn1p6q--_", - "value" : 1, - "ts" : "2014-05-07T21:19:09.522" - }, - { - "_key" : "132648", - "_id" : "exampleTime/132648", - "_rev" : "_YOn1p6q--B", - "value" : 2, - "ts" : "2014-05-08T04:19:09.522" - } -] diff --git a/Documentation/README_maintainers.md b/Documentation/README_maintainers.md deleted file mode 100644 index c00e2d6cddc3..000000000000 --- a/Documentation/README_maintainers.md +++ /dev/null @@ -1,385 +0,0 @@ -# ArangoDB Documentation Maintainers manual - -- [Using Docker container](#using-docker-container) -- [Installing on local system](#installing-on-local-system) -- [Add / Synchronize external documentation](#add--synchronize-external-documentation) -- [Generate users documentation](#generate-users-documentation) -- [Using Gitbook](#using-gitbook) -- [Examples](#examples) - * [Where to add new...](#where-to-add-new) - * [generate](#generate) -- [write markdown](#write-markdown) -- [Include ditaa diagrams](#include-ditaa-diagrams) -- [Read / use the documentation](#read--use-the-documentation) -- [arangod Example tool](#arangod-example-tool) - * [OUTPUT, RUN and AQL specifics](#output-run-and-aql-specifics) -- [Swagger integration](#swagger-integration) - * [RESTQUERYPARAMETERS](#restqueryparameters) - * [RESTURLPARAMETERS](#resturlparameters) - * [RESTDESCRIPTION](#restdescription) - * [RESTRETURNCODES](#restreturncodes) - * [RESTREPLYBODY](#restreplybody) - * [RESTHEADER](#restheader) - * [RESTURLPARAM](#resturlparam) - * [RESTALLBODYPARAM](#restallbodyparam) - * [RESTBODYPARAM](#restbodyparam) - * [RESTSTRUCT](#reststruct) - -# Using Docker container - -We provide the docker container `arangodb/documentation-builder` which brings -all neccessary dependencies to build the documentation. - -The files and a description how to (re-)generate the Docker image are here:
-https://github.com/arangodb-helper/build-docker-containers/tree/master/distros/debian/jessie.docu - -You can automagically build it using - - ./scripts/generateDocumentation.sh - -which will start the docker container, compile ArangoDB, generate fresh example snippets, generate swagger, and all gitbook -produced output files. - -You can also use `proselint` inside of that container to let it proof read your english ;-) - -# Installing on local system - -Dependencies to build documentation: - -- [swagger 2](http://swagger.io/) for the API-Documentation inside aardvark (no installation required) - -- [Node.js](https://nodejs.org/) - - Make sure the option to *Add to PATH* is enabled. - After installation, the following commands should be available everywhere: - - - `node` - - `npm` - - If not, add the installation path to your environment variable PATH. - Gitbook requires more recent node versions. - -- [Gitbook](https://github.com/GitbookIO/gitbook) - - Can be installed with Node's package manager NPM: - - npm install gitbook-cli -g - -- [ditaa (DIagrams Through Ascii Art)](http://ditaa.sourceforge.net/) to build the - ascii art diagrams (optional) - -- Calibre2 (optional, only required if you want to build the e-book version) - - http://calibre-ebook.com/download - - Run the installer and follow the instructions. - -# Add / Synchronize external documentation - -We maintain documentation along with their respective git repositories of their component - -be driver or other utilities which shouldn't be directly in sync to the ArangoDB core documentation. -The maintainer of the respective component can alter the documentation, and once a good point in -time is reached, it can be sync'ed over via `Documentation/Scripts/fetchRefs.sh`, which spiders -the `SUMMARY.md` files of all books, creates a clone of the external resource, adds a `don't edit this here` note to the files, and copies them over. -Use your *github username* as first parameter to clone using HTTP + authentification, or `git` if you want to use ssh+key for authentification - -The syntax of the `SUMMARY.md` integration are special comment lines that contain `git` in them in a semicolon separated value list: - - - The git repository - the gitrepository containing the documentation - we will clone this; If authentification is required, prepend an `@` to `github.com` - - The directory name where to clone it under `Documentation/Books/repos` (so several integration points can share a working copy) - - Subdirectory - the sub-directory inside of the git repository to integrate, also used in the `VERSIONS` file - - Source - may be empty if the whole Subdirectory should be mapped into the book the `SUMMARY.md` is in, else specify source files (one per line) or directories - - Destination - may be empty if the sub-directory on the remote repo should be mapped into the book the `SUMMARY.md` is located in; else specify a file or directory. - -If a other than the default branch should be checked out you can specify the branch|tag in the VERSIONS file. The syntax is `EXTERNAL_DOC_{the-directory-name}={remote-branch-name|tag}` - -If private repositories with authentification need to be cloned, the integrator can specify a username/password pair to the script. He/She can also create a clone in the `Documentation/Books/repos/$1` directory - where the script would clone it. - -The script will reset & pull the repos. - -For testing the user can checkout other remote branches in that directory. - -Below the integration lines regular lines referencing the integrated .md's have to be put to add the .md's to the books summary. - -Please note that the SUMMARY.md integration checks will fail if unreferenced .md's are present, or .md's are missing. - -The fetched .md's should be committed along with the changes of the `SUMMARY.md` - -An example integrating an authentificated directory structure: - - # https://@github.com/arangodb/arangosync.git;arangosync;doc-integration/Manual;;/ - * [Datacenter to datacenter replication](Scalability/DC2DC/README.md) - * [Introduction](Scalability/DC2DC/Introduction.md) - * [Applicability](Scalability/DC2DC/Applicability.md) - * [Requirements](Scalability/DC2DC/Requirements.md) - -Another example, integrating a single README.md from an unauthentificated repo mapping it into `Drivers/js/`: - - * [Drivers](Drivers/README.md) - # https://github.com/arangodb/arangojs.git;arangojs;;README.md;Drivers/js/ - * [Javascript](Drivers/js/README.md) - -# Generate users documentation - -If you've edited examples, see below how to regenerate them with -[`./utils/generateExamples.sh`](https://github.com/arangodb/arangodb/blob/devel/utils/generateExamples.sh). -If you've edited REST (AKA HTTP) documentation, first invoke -[`./utils/generateSwagger.sh`](https://github.com/arangodb/arangodb/blob/devel/utils/generateSwagger.sh). -Run `cd Documentation/Books && ./build.sh` to generate it. -The documentation will be generated in subfolders in `arangodb/Documentation/Books/books` - -use your favorite browser to read it. - -You may encounter permission problems with gitbook and its npm invocations. -In that case, you need to run the command as root / Administrator. - -If you see "device busy" errors on Windows, retry. Make sure you don't have -intermediate files open in the ppbooks / books subfolder (i.e. browser or editor). -It can also temporarily occur during phases of high HDD / SSD load. - -The build-scripts contain several sanity checks, i.e. whether all examples are -used, and no dead references are there. (see building examples in that case below) - -If the markdown files aren't converted to html, or `index.html` shows a single -chapter only (content of `README.md`), make sure -[Cygwin create native symlinks](https://docs.arangodb.com/devel/Cookbook/Compiling/Windows.html) -It does not, if `SUMMARY.md` in `Books/ppbooks/` looks like this: - - !ÿþf o o - -If sub-chapters do not show in the navigation, try another browser (Firefox). -Chrome's security policies are pretty strict about localhost and file:// -protocol. You may access the docs through a local web server to lift the -restrictions. You can use pythons build in http server for this. - - ~/books$ python -m SimpleHTTPServer 8000 - -To only regereneate one file (faster) you may specify a filter: - - make build-book NAME=Manual FILTER=Manual/Aql/Invoke.md - -(regular expressions allowed) - -# Using Gitbook - -The `arangodb/Documentation/Books/build.sh` script generates a website -version of the manual. - -If you want to generate all media ala PDF, ePUB, run `arangodb/Documentation/books/build.sh build-dist-books` (takes some time to complete). - -If you want to generate only one of them, run below -build commands in `arangodb/Documentation/Books/books/[Manual|HTTP|AQL]/`. Calibre's -`ebook-convert` will be used for the conversion. - -Generate a PDF: - - gitbook pdf ./ppbooks/Manual ./target/path/filename.pdf - -Generate an ePub: - - gitbook epub ./ppbooks/Manual ./target/path/filename.epub - -# Examples - -## Where to add new... - -- Documentation/DocuBlocks/* - markdown comments with execution section - - Documentation/Books/Aql|Cookbook|HTTP|Manual/SUMMARY.md - index of all sub documentations - -## generate - - - `./utils/generateExamples.sh --onlyThisOne geoIndexSelect` will only produce one example - *geoIndexSelect* - - `./utils/generateExamples.sh --onlyThisOne 'MOD.*'` will only produce the examples matching that regex; Note that - examples with enumerations in their name may base on others in their series - so you should generate the whole group. - - running `onlyThisOne` in conjunction with a pre-started server cuts down the execution time even more. - In addition to the `--onlyThisOne ...` specify i.e. `--server.endpoint tcp://127.0.0.1:8529` to utilize your already running arangod. - Please note that examples may collide with existing collections like 'test' - you need to make sure your server is clean enough. - - you can use generateExamples like that: - `./utils/generateExamples.sh \ - --server.endpoint 'tcp://127.0.0.1:8529' \ - --withPython C:/tools/python2/python.exe \ - --onlyThisOne 'MOD.*'` - - `./Documentation/Scripts/allExamples.sh` generates a file where you can inspect all examples for readability. - - `./utils/generateSwagger.sh` - on top level to generate the documentation interactively with the server; you may use - [the swagger editor](https://github.com/swagger-api/swagger-editor) to revalidate whether - *../../js/apps/system/_admin/aardvark/APP/api-docs.json* is accurate. - - `cd Documentation/Books; make` - to generate the HTML documentation - - -# write markdown - -*md* files are used for the structure. To join it with parts extracted from the program documentation -you need to place hooks: - - `@startDocuBlock ` is replaced by a Docublock extracted from source. - - `@startDocuBlockInline ` till `@endDocuBlock ` - is replaced in with its own evaluated content - so *@EXAMPLE_AQL | @EXAMPLE_ARANGOSH_[OUTPUT | RUN]* sections are executed - the same way as inside of source code documentation. - -# Include ditaa diagrams - -We use the [beautifull ditaa (DIagrams Through Ascii Art)](http://ditaa.sourceforge.net/) to generate diagrams explaining flows etc. -in our documentation. - -We have i.e. `Manual/Graphs/graph_user_in_group.ditaa` which is transpiled by ditaa into a png file, thus you simply include -a png file of the same name as image into the markdown: `![User in group example](graph_user_in_group.png)` to reference it. - -# Read / use the documentation - - - `file:///Documentation/Books/books/Manual/index.html` contains the generated manual - - JS-Console - Tools/API - [Interactive swagger documentation](https://arangodb.com/2018/03/using-arangodb-swaggerio-interactive-api-documentation/) which you can play with. - -# arangod Example tool - -`./utils/generateExamples.sh` picks examples from the code documentation, executes them, and creates a transcript including their results. - -Here is how its details work: - - all *Documentation/DocuBlocks/*.md* and *Documentation/Books/*.md* are searched. - - all lines inside of source code starting with '///' are matched, all lines in .md files. - - an example start is marked with *@EXAMPLE_ARANGOSH_OUTPUT* or *@EXAMPLE_ARANGOSH_RUN* - - the example is named by the string provided in brackets after the above key - - the output is written to `Documentation/Examples/.generated` - - if your example depends on a storage engine, prepend `` with `_rocksdb` or `_mmfiles` to run it against such a server - - examples end with *@END_EXAMPLE_[OUTPUT|RUN|AQL]* - - all code in between is executed as javascript in the **arangosh** while talking to a valid **arangod**. - - you should strive to group your examples by naming them with a common prefix per topic. - You may inspect the generated js code in `/tmp/arangosh.examples.js` - -## OUTPUT, RUN and AQL specifics - -By default, Examples should be self contained and thus not depend on each other. They should clean up the collections they create. -Building will fail if resources aren't cleaned. -However, if you intend a set of OUTPUT and RUN to demonstrate interactively and share generated *ids*, you have to use an alphabetical -sortable naming scheme so they're executed in sequence. Using `_[a|b|c|d]_thisDoesThat` seems to be a good scheme. - - - EXAMPLE_ARANGOSH_OUTPUT is intended to create samples that the users can cut'n'paste into their arangosh. Its used for javascript api documentation. - * wrapped lines: - Lines starting with a pipe (`/// |`) are joined together with the next following line. - You have to use this if you want to execute loops, functions or commands which shouldn't be torn apart by the framework. - * Lines starting with *var*: - The command behaves similar to the arangosh: the server reply won't be printed. - However, the variable will be in the scope of the other lines - else it won't. - * Lines starting with a Tilde (`/// ~`): - These lines can be used for setup/teardown purposes. They are completely invisible in the generated example transcript. - * `~removeIgnoreCollection("test")` - the collection test may live across several tests. - * `~addIgnoreCollection("test")` - the collection test will again be alarmed if left over. - - - it is executed line by line. If a line is intended to fail (aka throw an exception), - you have to specify `// xpError(ERROR_ARANGO_DOCUMENT_KEY_UNEXPECTED)` so the exception will be caught; - else the example is marked as broken. - If you need to wrap that line, you may want to make the next line starting by a tilde to suppress an empty line: - - /// | someLongStatement() - /// ~ // xpError(ERROR_ARANGO_DOCUMENT_KEY_UNEXPECTED) - - - EXAMPLE_ARANGOSH_RUN is intended to be pasted into a unix shell with *cURL* to demonstrate how the REST-HTTP-APIs work. - The whole chunk of code is executed at once, and is expected **not to throw**. - You should use **assert(condition)** to ensure the result is what you've expected. - The *body* can be a string, or a javascript object which is then represented in JSON format. - - * Send the HTTP-request: `var response = logCurlRequest('POST', url, body);` - * check its response: `assert(response.code === 200);` - * output a JSON server Reply: `logJsonResponse(response);` (will fail if its not a valid json) - * output a JSONL server Reply: `logJsonLResponse(response);` (will fail if its not a valid json; - use if the server responds with one json document per line; Add a note to the user that this is `*(One JSON document per line)*` above the example) - * output HTML to the user: `logHtmlResponse(response);` (i.e. redirects have HTML documents) - * output the plain text to dump to the user: `logRawResponse(response);` (**don't use this if you expect a json reply**) - * dump the reply to the errorlog for testing (will mark run as failed): `logErrorResponse(response);` - - - EXAMPLE_AQL is intended to contain AQL queries that can be pasted into arangosh or the webinterfaces query editor. - Usually this query references an example dataset generator in `js/common/modules/@arangodb/examples/examples.js` - which the users can also invoke to generate the data in their installation. - This sort of example consists of three parts: - - @DATASET{datasetName} - (optional) the name of the dataset in the above mentioned `examples.js` to be instanciated before executing this query. - - @EXPLAIN{TRUE|FALSE} - (optional) print execution plan of the AQL query. The default is `FALSE`. - - A following AQL query which may either end at the end of the comment block, or at the next section: - - @BV - (optional) verbatim object containing the bind parameters to be passed into the query. Will also be put into the generated snippet. - -# Swagger integration - -`./utils/generateSwagger.sh` scans the documentation, and generates swagger output. -It scans for all documentationblocks containing `@RESTHEADER`. -It is a prerequisite for integrating these blocks into the gitbook documentation. - -Tokens may have curly brackets with comma separated fields. They may optionally be followed by subsequent text -lines with a long descriptions. - -Sections group a set of tokens; they don't have parameters. - -**Types** -Swagger has several native types referenced below: -*[integer|long|float|double|string|byte|binary|boolean|date|dateTime|password]* - -[see the swagger documentation](https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#data-types) - -It consists of several sections which in term have sub-parameters: - -**Supported Sections:** - -## RESTQUERYPARAMETERS - -Parameters to be appended to the URL in form of ?foo=bar -add *RESTQUERYPARAM*s below - -## RESTURLPARAMETERS - -Section of parameters placed in the URL in curly brackets, add *RESTURLPARAM*s below. - -## RESTDESCRIPTION - -Long text describing this route. - -## RESTRETURNCODES - -should consist of several *RESTRETURNCODE* tokens. - -**Supported Tokens:** - -## RESTREPLYBODY - -Similar to RESTBODYPARAM - just what the server will reply with. - -## RESTHEADER - -Up to 3 parameters. -* *[GET|POST|PUT|DELETE] * url should start with a */*, it may contain parameters in curly brackets, that - have to be documented in subsequent *RESTURLPARAM* tokens in the *RESTURLPARAMETERS* section. -* long description -* operationId - this is a uniq string that identifies the source parameter for this rest route. It defaults to a de-spaced `long description` - if set once it shouldn't be changed anymore. - -## RESTURLPARAM - -Consists of 3 values separated by ',': -Attributes: - - *name*: name of the parameter - - *type*: - - *[required|optionas]* Optional is not supported anymore. Split the docublock into one with and one without. - -Folowed by a long description. - -## RESTALLBODYPARAM - -This API has a schemaless json body (in doubt just plain ascii) - -## RESTBODYPARAM - -Attributes: - - name - the name of the parameter - - type - the swaggertype of the parameter - - required/optional - whether the user can omit this parameter - - subtype / format (can be empty) - - subtype: if type is object or array, this references the enclosed variables. - can be either a swaggertype, or a *RESTRUCT* - - format: if type is a native swagger type, some support a format to specify them - -## RESTSTRUCT - -Groups a set of attributes under the `structure name` to an object that can be referenced -in other *RESTSTRUCT*s or *RESTBODYPARAM* attributes of type array or object - -Attributes: - - name - the name of the parameter - - structure name - the **type** under which this structure can be reached (should be uniq!) - - type - the swaggertype of the parameter (or another *RESTSTRUCT*...) - - required/optional - whether the user can omit this parameter - - subtype / format (can be empty) - - subtype: if type is object or array, this references the enclosed variables. - can be either a swaggertype, or a *RESTRUCT* - - format: if type is a native swagger type, some support a format to specify them diff --git a/Documentation/Scripts/codeBlockReader.py b/Documentation/Scripts/codeBlockReader.py index 8c5789743973..ee2365c966dc 100644 --- a/Documentation/Scripts/codeBlockReader.py +++ b/Documentation/Scripts/codeBlockReader.py @@ -96,7 +96,7 @@ def example_content(filepath, fh, tag, blockType, placeIntoFilePath): AQL_STATE_RESULT = 3 aqlState = AQL_STATE_QUERY - blockCount = 0; + blockCount = 0 # read in the context, split into long and short infile = io.open(filepath, 'r', encoding='utf-8', newline=None) diff --git a/Documentation/Scripts/fetchRefs.sh b/Documentation/Scripts/fetchRefs.sh deleted file mode 100755 index 13d85f8990e0..000000000000 --- a/Documentation/Scripts/fetchRefs.sh +++ /dev/null @@ -1,67 +0,0 @@ -#!/bin/bash - -set -e - -ALLBOOKS="HTTP AQL Manual Cookbook Drivers" - -GITAUTH="$1" - -for book in ${ALLBOOKS}; do - - repos=$(grep '^ *$;\1;') - - for oneRepo in ${repos}; do - - REPO=$(echo "$oneRepo" |cut -d ';' -f 1) - CLONEDIR=$(echo "$oneRepo" |cut -d ';' -f 2) - SUBDIR=$(echo "$oneRepo" |cut -d ';' -f 3) - SRC=$(echo "$oneRepo" |cut -d ';' -f 4) - DST=$(echo "$oneRepo" |cut -d ';' -f 5) - - - CODIR="../Books/repos/${CLONEDIR}" - AUTHREPO="${REPO/@/${GITAUTH}@}" - if test -d "${CODIR}"; then - ( - cd "${CODIR}" - git pull --all - ) - else - if test "${GITAUTH}" == "git"; then - AUTHREPO=$(echo "${AUTHREPO}" | sed -e "s;github.com/;github.com:;" -e "s;https://;;" ) - fi - git clone "${AUTHREPO}" "${CODIR}" - fi - - # extract branch/tag/... for checkout from VERSIONS file - branch=$(grep "EXTERNAL_DOC_${CLONEDIR}=" "../../VERSIONS" | sed "s/^EXTERNAL_DOC_${CLONEDIR}=//") - - if [ -z "${branch}" ]; then - echo "no branch for ${CLONEDIR}, specify in VERSIONS file." - exit 1 - fi - - # checkout name from VERSIONS file and pull=merge origin - (cd "${CODIR}" && git checkout "${branch}" && git pull) - - for oneMD in $(cd "${CODIR}/${SUBDIR}"; find "./${SRC}" -type f |sed "s;\./;;"); do - NAME=$(basename "${oneMD}") - MDSUBDIR="${oneMD/${NAME}/}" - DSTDIR="../Books/${book}/${DST}/${MDSUBDIR}" - TOPREF=$(echo "${MDSUBDIR}" | sed 's;\([a-zA-Z]*\)/;../;g') - if test ! -d "${DSTDIR}"; then - mkdir -p "${DSTDIR}" - fi - sourcefile="${CODIR}/${SUBDIR}/${SRC}/${oneMD}" - targetfile="${DSTDIR}/${NAME}" - if [[ "$sourcefile" == *.md ]]; then - ( - echo "" - sed "s;https://docs.arangodb.com/latest;../${TOPREF};g" "$sourcefile" - ) > "$targetfile" - else - cp "$sourcefile" "$targetfile" - fi - done - done -done diff --git a/Documentation/Scripts/generateMdFiles.py b/Documentation/Scripts/generateMdFiles.py deleted file mode 100644 index 4d9f82b4e780..000000000000 --- a/Documentation/Scripts/generateMdFiles.py +++ /dev/null @@ -1,840 +0,0 @@ -import sys -import re -import os -import json -import io -import shutil - -RESET = '\033[0m' -def make_std_color(No): - # defined for 1 through 7 - return '\033[3' + No+ 'm' -def make_color(No): - # defined for 1 through 255 - return '\033[38;5;'+ No + 'm' - -WRN_COLOR = make_std_color('3') -ERR_COLOR = make_std_color('1') -STD_COLOR = make_color('8') - -################################################################################ -### @brief length of the swagger definition namespace -################################################################################ - -defLen = len('#/definitions/') - -################################################################################ -### @brief facility to remove leading and trailing html-linebreaks -################################################################################ -removeTrailingBR = re.compile("
$") -removeLeadingBR = re.compile("^
") - -def brTrim(text): - return removeLeadingBR.sub("", removeTrailingBR.sub("", text.strip(' '))) - -swagger = None -fileFilter = None -blockFilter = None -dokuBlocks = [{},{}] -thisVerb = {} -route = '' -verb = '' - -################################################################################ -### Swagger Markdown rendering -################################################################################ -def getReference(name, source, verb): - try: - ref = name['$ref'][defLen:] - except Exception as x: - print >>sys.stderr, ERR_COLOR + "No reference in: " + name + RESET - raise - if not ref in swagger['definitions']: - fn = '' - if verb: - fn = swagger['paths'][route][verb]['x-filename'] - else: - fn = swagger['definitions'][source]['x-filename'] - print >> sys.stderr, STD_COLOR + json.dumps(swagger['definitions'], indent=4, separators=(', ',': '), sort_keys=True) + RESET - raise Exception("invalid reference: " + ref + " in " + fn) - return ref - -removeDoubleLF = re.compile("\n\n") -removeLF = re.compile("\n") - -def TrimThisParam(text, indent): - text = text.rstrip('\n').lstrip('\n') - text = removeDoubleLF.sub("\n", text) - if (indent > 0): - indent = (indent + 2) # align the text right of the list... - return removeLF.sub("\n" + ' ' * indent, text) - -def unwrapPostJson(reference, layer): - swaggerDataTypes = ["number", "integer", "string", "boolean", "array", "object"] - #### - # print >>sys.stderr, "xx" * layer + reference - global swagger - rc = '' - if not 'properties' in swagger['definitions'][reference]: - if 'items' in swagger['definitions'][reference]: - if swagger['definitions'][reference]['type'] == 'array': - rc += '[\n' - subStructRef = getReference(swagger['definitions'][reference]['items'], reference, None) - rc += unwrapPostJson(subStructRef, layer + 1) - if swagger['definitions'][reference]['type'] == 'array': - rc += ']\n' - else: - for param in swagger['definitions'][reference]['properties'].keys(): - thisParam = swagger['definitions'][reference]['properties'][param] - required = ('required' in swagger['definitions'][reference] and - param in swagger['definitions'][reference]['required']) - - # print >> sys.stderr, thisParam - if '$ref' in thisParam: - subStructRef = getReference(thisParam, reference, None) - - rc += ' ' * layer + "- **" + param + "**:\n" - #### - # print >>sys.stderr, "yy" * layer + param - rc += unwrapPostJson(subStructRef, layer + 1) - - elif thisParam['type'] == 'object': - rc += ' ' * layer + "- **" + param + "**: " + TrimThisParam(brTrim(thisParam['description']), layer) + "\n" - elif thisParam['type'] == 'array': - rc += ' ' * layer + "- **" + param + "**" - trySubStruct = False - lf="" - #### - # print >>sys.stderr, "zz" * layer + param - if 'type' in thisParam['items']: - rc += " (" + thisParam['items']['type'] + ")" - lf="\n" - else: - if len(thisParam['items']) == 0: - rc += " (anonymous json object)" - lf="\n" - else: - trySubStruct = True - rc += ": " + TrimThisParam(brTrim(thisParam['description']), layer) + lf - if trySubStruct: - try: - subStructRef = getReference(thisParam['items'], reference, None) - except: - print >>sys.stderr, ERR_COLOR + "while analyzing: " + param + RESET - print >>sys.stderr, WRN_COLOR + thisParam + RESET - rc += "\n" + unwrapPostJson(subStructRef, layer + 1) - else: - if thisParam['type'] not in swaggerDataTypes: - print >>sys.stderr, ERR_COLOR + "while analyzing: " + param + RESET - print >>sys.stderr, WRN_COLOR + thisParam['type'] + " is not a valid swagger datatype; supported ones: " + str(swaggerDataTypes) + RESET - raise Exception("invalid swagger type") - rc += ' ' * layer + "- **" + param + "**: " + TrimThisParam(thisParam['description'], layer) + '\n' - return rc - -def getRestBodyParam(): - rc = "\n**Body Parameters**\n" - addText = '' - for nParam in range(0, len(thisVerb['parameters'])): - if thisVerb['parameters'][nParam]['in'] == 'body': - descOffset = thisVerb['parameters'][nParam]['x-description-offset'] - addText = '' - if 'additionalProperties' not in thisVerb['parameters'][nParam]['schema']: - addText = unwrapPostJson( - getReference(thisVerb['parameters'][nParam]['schema'], route, verb),0) - rc += addText - return rc - -def getRestDescription(): - #print >>sys.stderr, "RESTDESCRIPTION" - if thisVerb['description']: - description = thisVerb['description'] - #print >> sys.stderr, description - description = RX4[0].sub(RX4[1], description) - return RX3[0].sub(RX3[1], description) - else: - #print >> sys.stderr, "ELSE" - return "" - -def getRestReplyBodyParam(param): - rc = "\n**Response Body**\n" - - try: - rc += unwrapPostJson(getReference(thisVerb['responses'][param]['schema'], route, verb), 0) - except Exception: - print >>sys.stderr, ERR_COLOR + "failed to search " + param + " in: " + RESET - print >>sys.stderr, WRN_COLOR + json.dumps(thisVerb, indent=4, separators=(', ',': '), sort_keys=True) + RESET - raise - return rc + "\n" - - -def noValidation(): - pass - -def validatePathParameters(): - # print thisVerb - for nParam in range(0, len(thisVerb['parameters'])): - if thisVerb['parameters'][nParam]['in'] == 'path': - break - else: - raise Exception("@RESTPATHPARAMETERS found in Swagger data without any parameter following in %s " % json.dumps(thisVerb, indent=4, separators=(', ',': '), sort_keys=True)) - -def validateQueryParams(): - # print thisVerb - for nParam in range(0, len(thisVerb['parameters'])): - if thisVerb['parameters'][nParam]['in'] == 'query': - break - else: - raise Exception("@RESTQUERYPARAMETERS found in Swagger data without any parameter following in %s " % json.dumps(thisVerb, indent=4, separators=(', ',': '), sort_keys=True)) - -def validateHeaderParams(): - # print thisVerb - for nParam in range(0, len(thisVerb['parameters'])): - if thisVerb['parameters'][nParam]['in'] == 'header': - break - else: - raise Exception("@RESTHEADERPARAMETERS found in Swagger data without any parameter following in %s " % json.dumps(thisVerb, indent=4, separators=(', ',': '), sort_keys=True)) - -def validateReturnCodes(): - # print thisVerb - for nParam in range(0, len(thisVerb['responses'])): - if len(thisVerb['responses'].keys()) != 0: - break - else: - raise Exception("@RESTRETURNCODES found in Swagger data without any documented returncodes %s " % json.dumps(thisVerb, indent=4, separators=(', ',': '), sort_keys=True)) - -def validateExamples(): - pass - -SIMPL_REPL_VALIDATE_DICT = { - "@RESTDESCRIPTION" : noValidation, - "@RESTURLPARAMETERS" : validatePathParameters, - "@RESTQUERYPARAMETERS" : validateQueryParams, - "@RESTHEADERPARAMETERS" : validateHeaderParams, - "@RESTRETURNCODES" : validateReturnCodes, - "@RESTURLPARAMS" : validatePathParameters, - "@EXAMPLES" : validateExamples -} -SIMPL_REPL_DICT = { - "\\" : "\\\\", - "@HINTS" : "", - "@RESTDESCRIPTION" : getRestDescription, - "@RESTURLPARAMETERS" : "\n**Path Parameters**\n", - "@RESTQUERYPARAMETERS" : "\n**Query Parameters**\n", - "@RESTHEADERPARAMETERS" : "\n**Header Parameters**\n", - "@RESTRETURNCODES" : "\n**Return Codes**\n", - "@PARAMS" : "\n**Parameters**\n", - "@RESTPARAMS" : "", - "@RESTURLPARAMS" : "\n**Path Parameters**\n", - "@RESTQUERYPARAMS" : "\n**Query Parameters**\n", - "@RESTBODYPARAM" : "", #getRestBodyParam, - "@RESTREPLYBODY" : getRestReplyBodyParam, - "@RESTQUERYPARAM" : "@RESTPARAM", - "@RESTURLPARAM" : "@RESTPARAM", - "@PARAM" : "@RESTPARAM", - "@RESTHEADERPARAM" : "@RESTPARAM", - "@EXAMPLES" : "\n**Examples**\n", - "@RESTPARAMETERS" : "" -} -SIMPLE_RX = re.compile( -r''' -\\| # the backslash... -@RESTDESCRIPTION| # -> -@HINTS| # -> -@RESTURLPARAMETERS| # -> \n**Path Parameters**\n -@RESTQUERYPARAMETERS| # -> \n**Query Parameters**\n -@RESTHEADERPARAMETERS| # -> \n**Header Parameters**\n -@RESTBODYPARAM| # empty now, comes with the post body -> call post body param -@RESTRETURNCODES| # -> \n**Return Codes**\n -@PARAMS| # -> \n**Parameters**\n -@RESTPARAMS| # -> -@RESTURLPARAMS| # -> -@RESTQUERYPARAMS| # -> -@PARAM| # -> @RESTPARAM -@RESTURLPARAM| # -> @RESTPARAM -@RESTQUERYPARAM| # -> @RESTPARAM -@RESTHEADERPARAM| # -> @RESTPARAM -@EXAMPLES| # -> \n**Examples**\n -@RESTPARAMETERS| # -> -@RESTREPLYBODY\{(.*)\} # -> call body function -''', re.X) - - -def SimpleRepl(match): - m = match.group(0) - # print 'xxxxx [%s]' % m - n = None - try: - n = SIMPL_REPL_VALIDATE_DICT[m] - except: - True - if n != None: - n() - try: - n = SIMPL_REPL_DICT[m] - if n == None: - raise Exception("failed to find regex while searching for: " + m) - else: - if type(n) == type(''): - return n - else: - return n() - except Exception: - pos = m.find('{') - if pos > 0: - newMatch = m[:pos] - param = m[pos + 1 :].rstrip(' }') - try: - n = SIMPL_REPL_DICT[newMatch] - if n == None: - raise Exception("failed to find regex while searching for: " + - newMatch + " extracted from: " + m) - else: - if type(n) == type(''): - return n - else: - return n(param) - except Exception as x: - #raise Exception("failed to find regex while searching for: " + - # newMatch + " extracted from: " + m) - raise - else: - raise Exception("failed to find regex while searching for: " + m) - -RX = [ - (re.compile(r""), ""), - # remove the placeholder BR's again - (re.compile(r"
\n"), "\n"), - # multi line bullet lists should become one - (re.compile(r"\n\n-"), "\n-"), - - #HTTP API changing code - # unwrap multi-line-briefs: (up to 3 lines supported by now ;-) - (re.compile(r"@brief(.+)\n(.+)\n(.+)\n\n"), r"@brief\g<1> \g<2> \g<3>\n\n"), - (re.compile(r"@brief(.+)\n(.+)\n\n"), r"@brief\g<1> \g<2>\n\n"), - # if there is an @brief above a RESTHEADER, swap the sequence - (re.compile(r"@brief(.+\n*)\n@RESTHEADER{([#\s\w\/\_{}-]*),([\s\w-]*)}"), r"###\g<3>\n\g<1>\n\n`\g<2>`"), - # else simply put it into the text - (re.compile(r"@brief(.+)"), r"\g<1>"), - # there should be no RESTHEADER without brief, so we will fail offensively if by not doing - #(re.compile(r"@RESTHEADER{([\s\w\/\_{}-]*),([\s\w-]*)}"), r"###\g<2>\n`\g<1>`"), - - # Format error codes from errors.dat - (re.compile(r"#####+\n"), r""), - (re.compile(r"## (.+\n\n)## (.+\n)"), r"## \g<1>\g<2>"), - # (re.compile(r"- (\w+):\s*@LIT{(.+)}"), r"\n*\g<1>* - **\g<2>**:"), - (re.compile(r"(.+),(\d+),\"(.+)\",\"(.+)\""), r'\n*
**\g<2>** - **\g<1>**
\n \g<4>'), - - (re.compile(r"TODOSWAGGER.*"),r"") - ] - - -# (re.compile(r"@RESTPARAM{([\s\w-]*),([\s\w\_\|-]*),\s*(\w+)}"), r"* *\g<1>*:"), -# (re.compile(r"@RESTRETURNCODE{(.*)}"), r"* *\g<1>*:"), -# (re.compile(r"@RESTBODYPARAMS{(.*)}"), r"*(\g<1>)*"), - -RX2 = [ - # parameters - extract their type and whether mandatory or not. - (re.compile(r"@RESTPARAM{(\s*[\w\-]*)\s*,\s*([\w\_\|-]*)\s*,\s*(required|optional)}"), r"* *\g<1>* (\g<3>):"), - (re.compile(r"@RESTALLBODYPARAM{(\s*[\w\-]*)\s*,\s*([\w\_\|-]*)\s*,\s*(required|optional)}"), r"\n**Request Body** (\g<3>)\n\n"), - - (re.compile(r"@RESTRETURNCODE{(.*)}"), r"* *\g<1>*:") -] - -RX3 = (re.compile(r'\*\*Example:\*\*((?:.|\n)*?)'), r"") - -RX4 = (re.compile(r'.*', re.DOTALL), r"") - -match_RESTHEADER = re.compile(r"@RESTHEADER\{(.*)\}") -match_RESTRETURNCODE = re.compile(r"@RESTRETURNCODE\{(.*)\}") -have_RESTBODYPARAM = re.compile(r"@RESTBODYPARAM|@RESTDESCRIPTION") -have_RESTREPLYBODY = re.compile(r"@RESTREPLYBODY") -have_RESTSTRUCT = re.compile(r"@RESTSTRUCT") -remove_MULTICR = re.compile(r'\n\n\n*') - -RXIMAGES = re.compile(r".*\!\[([\d\s\w\/\. ()-]*)\]\(([\d\s\w\/\.-]*)\).*") - -def _mkdir_recursive(path): - sub_path = os.path.dirname(path) - if not os.path.exists(sub_path): - _mkdir_recursive(sub_path) - if not os.path.exists(path): - os.mkdir(path) - - -def replaceCode(lines, blockName): - global swagger, thisVerb, route, verb - thisVerb = {} - foundRest = False - # first find the header: - headerMatch = match_RESTHEADER.search(lines) - if headerMatch and headerMatch.lastindex > 0: - foundRest = True - try: - (verb,route) = headerMatch.group(1).split(',')[0].split(' ') - verb = verb.lower() - except: - print >> sys.stderr, ERR_COLOR + "failed to parse header from: " + headerMatch.group(1) + " while analysing " + blockName + RESET - raise - - try: - thisVerb = swagger['paths'][route][verb] - except: - print >> sys.stderr, ERR_COLOR + "failed to locate route in the swagger json: [" + verb + " " + route + "]" + " while analysing " + blockName + RESET - print >> sys.stderr, WRN_COLOR + lines + RESET - print >> sys.stderr, "Did you forget to run utils/generateSwagger.sh?" - raise - - for (oneRX, repl) in RX: - lines = oneRX.sub(repl, lines) - - - if foundRest: - rcCode = None - foundRestBodyParam = False - foundRestReplyBodyParam = False - lineR = lines.split('\n') - #print lineR - l = len(lineR) - r = 0 - while (r < l): - # remove all but the first RESTBODYPARAM: - if have_RESTBODYPARAM.search(lineR[r]): - if foundRestBodyParam: - lineR[r] = '' - else: - lineR[r] = '@RESTDESCRIPTION' - foundRestBodyParam = True - r+=1 - while ((len(lineR[r]) == 0) or - ((lineR[r][0] != '@') or - have_RESTBODYPARAM.search(lineR[r]))): - # print "xxx - %d %s" %(len(lineR[r]), lineR[r]) - lineR[r] = '' - r+=1 - - m = match_RESTRETURNCODE.search(lineR[r]) - if m and m.lastindex > 0: - rcCode = m.group(1) - - # remove all but the first RESTREPLYBODY: - if have_RESTREPLYBODY.search(lineR[r]): - if foundRestReplyBodyParam != rcCode: - lineR[r] = '@RESTREPLYBODY{' + rcCode + '}\n' - else: - lineR[r] = '' - foundRestReplyBodyParam = rcCode - r+=1 - while (len(lineR[r]) > 1): - lineR[r] = '' - r+=1 - m = match_RESTRETURNCODE.search(lineR[r]) - if m and m.lastindex > 0: - rcCode = m.group(1) - - # remove all RESTSTRUCTS - they're referenced anyways: - if have_RESTSTRUCT.search(lineR[r]): - while (len(lineR[r]) > 1): - lineR[r] = '' - r+=1 - r+=1 - lines = "\n".join(lineR) - #print "x" * 70 - #print lines - try: - lines = SIMPLE_RX.sub(SimpleRepl, lines) - except Exception as x: - print >> sys.stderr, ERR_COLOR + "While working on: [" + verb + " " + route + "]" + " while analysing " + blockName + RESET - print >> sys.stderr, WRN_COLOR + x.message + RESET - print >> sys.stderr, "Did you forget to run utils/generateSwagger.sh?" - raise - - - for (oneRX, repl) in RX2: - lines = oneRX.sub(repl, lines) - - lines = remove_MULTICR.sub("\n\n", lines) - #print lines - return lines - -def replaceCodeIndex(lines): - lines = re.sub(r"","", lines) - #HTTP API changing code - #lines = re.sub(r"@brief(.+)",r"\g<1>", lines) - #lines = re.sub(r"@RESTHEADER{([\s\w\/\_{}-]*),([\s\w-]*)}", r"###\g<2>\n`\g<1>`", lines) - return lines - -RXUnEscapeMDInLinks = re.compile("\\\\_") -def setAnchor(param): - unescapedParam = RXUnEscapeMDInLinks.sub("_", param) - return "#" - -RXFinal = [ - (re.compile(r"@anchor (.*)"), setAnchor), -] -def replaceCodeFullFile(lines): - for (oneRX, repl) in RXFinal: - lines = oneRX.sub(repl, lines) - return lines - -################################################################################ -# main loop over all files -################################################################################ -def walk_on_files(inDirPath, outDirPath): - global fileFilter - count = 0 - skipped = 0 - for root, dirs, files in os.walk(inDirPath): - for file in files: - if file.endswith(".md") and not file.endswith("SUMMARY.md"): - count += 1 - nextInFileFull = os.path.join(root, file) - nextOutFileFull = os.path.join(outDirPath, nextInFileFull) - if fileFilter != None: - if fileFilter.match(nextInFileFull) == None: - skipped += 1 - # print "Skipping %s -> %s" % (inFileFull, outFileFull) - continue; - #print "%s -> %s" % (nextInFileFull, nextOutFileFull) - _mkdir_recursive(os.path.join(outDirPath, root)) - findStartCode(nextInFileFull, nextOutFileFull, inDirPath) - print STD_COLOR + "Processed %d files, skipped %d" % (count, skipped) + RESET - -def findStartCode(inFileFull, outFileFull, baseInPath): - inFD = io.open(inFileFull, "r", encoding="utf-8", newline=None) - textFile = inFD.read() - inFD.close() - #print "-" * 80 - #print textFile - matchInline = re.findall(r'@startDocuBlockInline\s*(\w+)', textFile) - if matchInline: - for find in matchInline: - #print "7"*80 - #print inFileFull + " " + find - textFile = replaceTextInline(textFile, inFileFull, find) - #print textFile - - match = re.findall(r'@startDocuBlock\s*(\w+)', textFile) - if match: - for find in match: - #print "8"*80 - #print find - textFile = replaceText(textFile, inFileFull, find) - #print textFile - - try: - textFile = replaceCodeFullFile(textFile) - except: - print >>sys.stderr, ERR_COLOR + "while parsing : " + inFileFull + RESET - raise - #print "9" * 80 - #print textFile - - def analyzeImages(m): - imageLink = m.groups()[1] - inf = os.path.realpath(os.path.join(os.path.dirname(inFileFull), imageLink)) - outf = os.path.realpath(os.path.join(os.path.dirname(outFileFull), imageLink)) - bookDir = os.path.realpath(baseInPath) - depth = len(inFileFull.split(os.sep)) - 1 # filename + book directory - assets = os.path.join((".." + os.sep)*depth, baseInPath, "assets") - # print(inf, outf, bookDir, depth, assets) - - outdir = os.path.dirname(outf) - if not os.path.exists(outdir): - _mkdir_recursive(outdir) - if os.path.commonprefix([inf, bookDir]) != bookDir: - assetDir = os.path.join(outdir, assets) - if not os.path.exists(assetDir): - os.mkdir(assetDir) - outf=os.path.join(assetDir, os.path.basename(imageLink)) - imageLink = os.path.join((".." + os.sep)* (depth - 1), "assets",os.path.basename(imageLink)) - - if not os.path.exists(outf): - shutil.copy(inf, outf) - return str('![' + m.groups()[0] + '](' + imageLink + ')') - - textFile = re.sub(RXIMAGES,analyzeImages, textFile) - outFD = io.open(outFileFull, "w", encoding="utf-8", newline="") - outFD.write(textFile) - outFD.close() -#JSF_put_api_replication_synchronize - -def replaceText(text, pathOfFile, searchText): - ''' inserts docublocks into md ''' - #print '7'*80 - global dokuBlocks - if not searchText in dokuBlocks[0]: - print >> sys.stderr, ERR_COLOR + "Failed to locate the docublock '" + searchText + "' for replacing it into the file '" +pathOfFile + "'\n have:" + RESET - print >> sys.stderr, WRN_COLOR + dokuBlocks[0].keys() + RESET - print >> sys.stderr, ERR_COLOR + '*' * 80 + RESET - print >> sys.stderr, WRN_COLOR + text + RESET - print >> sys.stderr, ERR_COLOR + "Failed to locate the docublock '" + searchText + "' for replacing it into the file '" +pathOfFile + "' For details scroll up!" + RESET - exit(1) - #print '7'*80 - #print dokuBlocks[0][searchText] - #print '7'*80 - rc= re.sub("@startDocuBlock\s+"+ searchText + "(?:\s+|$)", dokuBlocks[0][searchText], text) - return rc - -def replaceTextInline(text, pathOfFile, searchText): - ''' inserts docublocks into md ''' - global dokuBlocks - if not searchText in dokuBlocks[1]: - print >> sys.stderr, ERR_COLOR + "Failed to locate the inline docublock '" + searchText + "' for replacing it into the file '" + pathOfFile + "'\n have: " + RESET - print >> sys.stderr, "%s%s%s" %(WRN_COLOR, dokuBlocks[1].keys(), RESET) - print >> sys.stderr, ERR_COLOR + '*' * 80 + RESET - print >> sys.stderr, WRN_COLOR + text + RESET - print >> sys.stderr, ERR_COLOR + "Failed to locate the inline docublock '" + searchText + "' for replacing it into the file '" + pathOfFile + "' For details scroll up!" + RESET - exit(1) - rePattern = r'(?s)\s*@startDocuBlockInline\s+'+ searchText +'\s.*?@endDocuBlock\s' + searchText - # (?s) is equivalent to flags=re.DOTALL but works in Python 2.6 - match = re.search(rePattern, text) - - if (match == None): - print >> sys.stderr, ERR_COLOR + "failed to match with '" + rePattern + "' for " + searchText + " in file " + pathOfFile + " in: \n" + text + RESET - exit(1) - - subtext = match.group(0) - if (len(re.findall('@startDocuBlock', subtext)) > 1): - print >> sys.stderr, ERR_COLOR + "failed to snap with '" + rePattern + "' on end docublock for " + searchText + " in " + pathOfFile + " our match is:\n" + subtext + RESET - exit(1) - - return re.sub(rePattern, dokuBlocks[1][searchText], text) - -################################################################################ -# Read the docublocks into memory -################################################################################ -thisBlock = "" -thisBlockName = "" -thisBlockType = 0 - -STATE_SEARCH_START = 0 -STATE_SEARCH_END = 1 -SEARCH_START = re.compile(r" *start[0-9a-zA-Z]*\s\s*([0-9a-zA-Z_ ]*)\s*$") - - -def readStartLine(line): - global thisBlockName, thisBlockType, thisBlock, dokuBlocks - if ("@startDocuBlock" in line): - if "@startDocuBlockInline" in line: - thisBlockType = 1 - else: - thisBlockType = 0 - try: - thisBlockName = SEARCH_START.search(line).group(1).strip() - except: - print >> sys.stderr, ERR_COLOR + "failed to read startDocuBlock: [" + line + "]" + RESET - exit(1) - dokuBlocks[thisBlockType][thisBlockName] = "" - return STATE_SEARCH_END - return STATE_SEARCH_START - -def readNextLine(line): - global thisBlockName, thisBlockType, thisBlock, dokuBlocks - if '@endDocuBlock' in line: - return STATE_SEARCH_START - dokuBlocks[thisBlockType][thisBlockName] += line - #print "reading " + thisBlockName - #print dokuBlocks[thisBlockType][thisBlockName] - return STATE_SEARCH_END - -def loadDokuBlocks(): - state = STATE_SEARCH_START - f = io.open("allComments.txt", "r", encoding="utf-8", newline=None) - count = 0 - for line in f.readlines(): - if state == STATE_SEARCH_START: - state = readStartLine(line) - elif state == STATE_SEARCH_END: - state = readNextLine(line) - - #if state == STATE_SEARCH_START: - # print dokuBlocks[thisBlockType].keys() - - if blockFilter != None: - remainBlocks= {} - print STD_COLOR + "filtering blocks" + RESET - for oneBlock in dokuBlocks[0]: - if blockFilter.match(oneBlock) != None: - print "%sfound block %s%s" % (STD_COLOR, oneBlock, RESET) - #print dokuBlocks[0][oneBlock] - remainBlocks[oneBlock] = dokuBlocks[0][oneBlock] - dokuBlocks[0] = remainBlocks - - for oneBlock in dokuBlocks[0]: - try: - #print "processing %s" % oneBlock - dokuBlocks[0][oneBlock] = replaceCode(dokuBlocks[0][oneBlock], oneBlock) - #print "6"*80 - #print dokuBlocks[0][oneBlock] - #print "6"*80 - except: - print >>sys.stderr, ERR_COLOR + "while parsing :\n" + oneBlock + RESET - raise - - for oneBlock in dokuBlocks[1]: - try: - dokuBlocks[1][oneBlock] = replaceCode(dokuBlocks[1][oneBlock], oneBlock) - except: - print >>sys.stderr, WRN_COLOR + "while parsing :\n" + oneBlock + RESET - raise - -def loadProgramOptionBlocks(): - from itertools import groupby, chain - from cgi import escape - from glob import glob - - global dokuBlocks - - # Allows to test if a group will be empty with obsolete options ignored - def peekIterator(iterable, condition): - try: - while True: - first = next(iterable) - if condition(first): - break - except StopIteration: - return None - return first, chain([first], iterable) - - # Give options a the section name 'global' if they don't have one - def groupBySection(elem): - return elem[1]["section"] or 'global' - - # Empty section string means global option, which should appear first - def sortBySection(elem): - section = elem[1]["section"] - if section: - return (1, section) - return (0, u'global') - - # Format possible values as unordered list - def formatList(arr, text=''): - formatItem = lambda elem: '
  • {}
  • '.format(elem) - return '{}
      {}
    \n'.format(text, '\n'.join(map(formatItem, arr))) - - for programOptionsDump in glob(os.path.normpath('../Examples/*.json')): - - program = os.path.splitext(os.path.basename(programOptionsDump))[0] - output = [] - - # Load program options dump and convert to Python object - with io.open(programOptionsDump, 'r', encoding='utf-8', newline=None) as fp: - try: - optionsRaw = json.load(fp) - except ValueError as err: - # invalid JSON - print >>sys.stderr, ERR_COLOR + "Failed to parse program options json: '" + programOptionsDump + "' - to be used as: '" + program + "' - " + err.message + RESET - raise err - - # Group and sort by section name, global section first - for groupName, group in groupby( - sorted(optionsRaw.items(), key=sortBySection), - key=groupBySection): - - # Use some trickery to skip obsolete options without consuming items from iterator - groupPeek = peekIterator(group, lambda elem: elem[1].setdefault("obsolete", False) is False) - if groupPeek is None: - # Skip empty section to avoid useless headline (all options are obsolete) - continue - - # Output table header with column labels (one table per section) - output.append('\n

    {} Options

    '.format(groupName.title())) - if program in ['arangod']: - output.append('\nAlso see {0} details.'.format(groupName.title())) - output.append('\n') - output.append(''.format('Name', 'Type', 'Description')) - output.append('') - - # Sort options by name and output table rows - for optionName, option in sorted(groupPeek[1], key=lambda elem: elem[0]): - - # Skip options marked as obsolete, eventhough they are not dumped at the moment - if option.setdefault("obsolete", False): - continue - - # Recover JSON syntax, because the Python representation uses [u'this format'] - default = json.dumps(option["default"]) - - # Whether the default value depends on the target host capabilities or configuration - dynamic = option.setdefault("dynamic", False) - - if dynamic: - defaultDynamic = '
    Default: dynamic (e.g. {})'.format(default) - else: - defaultDynamic = '
    Default: {}'.format(default) - - # Parse and re-format the optional field for possible values - # (not fully safe, but ', ' is unlikely to occur in strings) - try: - optionList = option["values"].partition('Possible values: ')[2].split(', ') - values = formatList(optionList, '
    Possible values:\n') - except KeyError: - values = '' - - # Expected data type for argument - valueType = option["type"] - - # Enterprise Edition has EE only options marked - enterprise = "" - if option.setdefault("enterpriseOnly", False): - enterprise = "Enterprise Edition only
    " - - # Beside option there are also flag-like commands (like --version) - isCommand = "" - category = option.setdefault("category", "option") - if category == "command": - isCommand = '
    This is a command, no value needs to be specified. The process terminates after executing the command.' - - # Some Boolean options can be used like flags (also true for commands) - isFlag = "" - requiresValue = option.setdefault("requiresValue", True) - if not requiresValue and category != "command": - isFlag = '
    This option can be specified without value to enable it.' - - # Versions since the option is available or when it was marked as deprecated - versionInfo = "" - introducedIn = option.setdefault("introducedIn", None) - deprecatedIn = option.setdefault("deprecatedIn", None) - if introducedIn: - versionInfo += '
    Introduced in: {}'.format(", ".join(introducedIn)) - if deprecatedIn: - versionInfo += '
    Deprecated in: {}'.format(", ".join(deprecatedIn)) - - # Upper-case first letter, period at the end, HTML entities - description = option["description"].strip() - description = description[0].upper() + description[1:] - if description[-1] != '.': - description += '.' - description = escape(description) - - # Description, default value and possible values separated by line breaks - descriptionCombined = '\n'.join([enterprise, description, isFlag, isCommand, defaultDynamic, values, versionInfo]) - - output.append(''.format(optionName, valueType, descriptionCombined)) - - output.append('
    {}{}{}
    {}{}{}
    ') - - # Join output and register as docublock (like 'program_options_arangosh') - dokuBlocks[0]['program_options_' + program.lower()] = '\n'.join(output) + '\n\n' - -if __name__ == '__main__': - if len(sys.argv) < 2: - print("usage: input-directory output-directory swaggerJson [filter]") - exit(1) - inDir = sys.argv[1] - outDir = sys.argv[2] - swaggerJson = sys.argv[3] - if len(sys.argv) > 4 and sys.argv[4].strip() != '': - print STD_COLOR + "filtering " + sys.argv[4] + RESET - fileFilter = re.compile(sys.argv[4]) - if len(sys.argv) > 5 and sys.argv[5].strip() != '': - print STD_COLOR + "filtering Docublocks: " + sys.argv[5] + RESET - blockFilter = re.compile(sys.argv[5]) - f = io.open(swaggerJson, 'r', encoding='utf-8', newline=None) - swagger= json.load(f) - f.close() - loadDokuBlocks() - loadProgramOptionBlocks() - print "%sloaded %d / %d docu blocks%s" % (STD_COLOR, len(dokuBlocks[0]), len(dokuBlocks[1]), RESET) - #print dokuBlocks[0].keys() - walk_on_files(inDir, outDir) diff --git a/Installation/ARM/postinst b/Installation/ARM/postinst index fd9c58abcbf5..47468273adc6 100755 --- a/Installation/ARM/postinst +++ b/Installation/ARM/postinst @@ -11,7 +11,7 @@ First Steps with ArangoDB: http:/www.arangodb.com/quickstart Upgrading ArangoDB: - https://docs.arangodb.com/Manual/Administration/Upgrading/ + https://www.arangodb.com/docs/stable/upgrading.html Upgrading ArangoDB database files: > /etc/init.d/arangodb3 upgrade diff --git a/Installation/rpm/arangodb.spec.in b/Installation/rpm/arangodb.spec.in index 0d9e95ab391f..55ed67ca7128 100644 --- a/Installation/rpm/arangodb.spec.in +++ b/Installation/rpm/arangodb.spec.in @@ -227,13 +227,13 @@ ArangoDB 3 (https://www.arangodb.com) or JavaScript extensions. First Steps with ArangoDB: - https://docs.arangodb.com/latest/Manual/GettingStarted/ + https://www.arangodb.com/docs/stable/getting-started.html Upgrading ArangoDB: - https://docs.arangodb.com/Installing/Upgrading.html + https://www.arangodb.com/docs/stable/upgrading.html Configuring the storage Engine: - https://docs.arangodb.com/latest/Manual/Administration/Configuration/GeneralArangod.html#storage-engine + https://www.arangodb.com/docs/stable/programs-arangod-server.html#storage-engine Upgrading ArangoDB database files: > /etc/init.d/arangodb3 upgrade diff --git a/README.md b/README.md index 66e2928ebc67..b439b8d70d9e 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -![ArangoDB-Logo](https://docs.arangodb.com/assets/arangodb_logo_2016_inverted.png) +![ArangoDB-Logo](https://www.arangodb.com/docs/assets/arangodb_logo_2016_inverted.png) ArangoDB ======== @@ -14,7 +14,7 @@ The supported data models can be mixed in queries and allow ArangoDB to be the aggregation point for your data. To get started, try one of our 10 minutes [tutorials](https://www.arangodb.com/tutorials) -in your favorite programming language or try one of our [ArangoDB Cookbook recipes](https://docs.arangodb.com/cookbook). +in your favorite programming language or try one of our [ArangoDB Cookbook recipes](https://www.arangodb.com/docs/stable/cookbook/). For the impatient: [download](https://www.arangodb.com/download) and install ArangoDB. Start the server `arangod` and point your browser to `http://127.0.0.1:8529/`. @@ -31,7 +31,7 @@ Key Features in ArangoDB Here is an AQL query that makes use of all those features: -![AQL Query Example](https://docs.arangodb.com/assets/aql_query_with_traversal.png) +![AQL Query Example](https://www.arangodb.com/docs/assets/aql_query_with_traversal.png) Joins and transactions are key features for flexible, secure data designs, widely used in relational databases but lacking in many NoSQL products. However, @@ -49,7 +49,7 @@ Microservice Example By extending the HTTP API with user code written in JavaScript, ArangoDB can be turned into a strict schema-enforcing persistence engine. -Next step, bundle your Foxx application as a [docker container](https://docs.arangodb.com/cookbook/Cloud/NodeJsDocker.html) +Next step, bundle your Foxx application as a [docker container](https://www.arangodb.com/docs/stable/cookbook/cloud-node-js-docker.html) and get it running in the cloud. Other features of ArangoDB include: @@ -85,16 +85,16 @@ Latest Release Packages for all supported platforms can be downloaded from [https://www.arangodb.com/download](https://www.arangodb.com/download/). -Please also check [what's new in ArangoDB](https://docs.arangodb.com/latest/Manual/ReleaseNotes/). +Please also check [what's new in ArangoDB](https://www.arangodb.com/docs/stable/release-notes.html). More Information ---------------- -Please check the [Installation Manual](https://docs.arangodb.com/latest/Manual/GettingStarted/Installing/) +Please check the [Installation Manual](https://www.arangodb.com/docs/stable/installation.html) for installation and compilation instructions. -The [User Manual](https://docs.arangodb.com/latest/Manual/GettingStarted/) has an +The [User Manual](https://www.arangodb.com/docs/stable/getting-started.html) has an introductory chapter showing the basic operations of ArangoDB. diff --git a/VERSIONS b/VERSIONS index f5468821b38f..7cf5cd57b6f1 100644 --- a/VERSIONS +++ b/VERSIONS @@ -1,21 +1,3 @@ STARTER_REV "0.14.5" SYNCER_REV "0.6.4" GCC_LINUX "8.3.0" -GSEARCH_ID_HTTP "010085642145132923492:fixi4yzeiz8" -GSEARCH_ID_AQL "010085642145132923492:6ymjhhr677k" -GSEARCH_ID_Manual "010085642145132923492:djexw6vlsgo" -GSEARCH_ID_Drivers "010085642145132923492:j_mijzclede" -GSEARCH_ID_Cookbook "010085642145132923492:nknyifjn7tu" -GCHANGE_FREQ "daily" -GPRIORITY "0.3" -BROWSEABLE_VERSIONS "'devel', '3.4', '3.3', '3.2', '3.1', '3.0', '2.8'" -EXTERNAL_DOC_arangodb-java-driver=master -EXTERNAL_DOC_arangojs=master -EXTERNAL_DOC_arangosync=master -EXTERNAL_DOC_arangodb=master -EXTERNAL_DOC_spring-data=master -EXTERNAL_DOC_kube-arangodb=master -EXTERNAL_DOC_foxx-cli=master -EXTERNAL_DOC_arangodb-php=devel -EXTERNAL_DOC_go-driver=master -EXTERNAL_DOC_arangodb-spark-connector=master diff --git a/arangod/Aql/EngineInfoContainerDBServer.cpp b/arangod/Aql/EngineInfoContainerDBServer.cpp index fa5137816c7a..63eb1aa5127f 100644 --- a/arangod/Aql/EngineInfoContainerDBServer.cpp +++ b/arangod/Aql/EngineInfoContainerDBServer.cpp @@ -171,8 +171,7 @@ void EngineInfoContainerDBServer::EngineInfo::addNode(ExecutionNode* node) { TRI_ASSERT(sourceImpl); if (node->isRestricted()) { - TRI_ASSERT(sourceImpl->restrictedShard.empty()); - sourceImpl->restrictedShard = node->restrictedShard(); + sourceImpl->restrictedShards.emplace(node->restrictedShard()); } }; @@ -315,10 +314,10 @@ void EngineInfoContainerDBServer::EngineInfo::serializeSnippet( bool isResponsibleForInitializeCursor) const { auto* collection = boost::get(&_source); TRI_ASSERT(collection); - auto& restrictedShard = collection->restrictedShard; + auto const& restrictedShards = collection->restrictedShards; - if (!restrictedShard.empty()) { - if (id != restrictedShard) { + if (!restrictedShards.empty()) { + if (restrictedShards.find(id) == restrictedShards.end()) { return; } // We only have one shard it has to be responsible! diff --git a/arangod/Aql/EngineInfoContainerDBServer.h b/arangod/Aql/EngineInfoContainerDBServer.h index 5b3ae8b59bf5..a28fa444ff3e 100644 --- a/arangod/Aql/EngineInfoContainerDBServer.h +++ b/arangod/Aql/EngineInfoContainerDBServer.h @@ -88,7 +88,6 @@ class EngineInfoContainerDBServer { explicit EngineInfo(size_t idOfRemoteNode) noexcept; EngineInfo(EngineInfo&& other) noexcept; ~EngineInfo(); - EngineInfo(EngineInfo&) = delete; EngineInfo(EngineInfo const& other) = delete; #if (_MSC_VER != 0) @@ -124,14 +123,11 @@ class EngineInfoContainerDBServer { private: struct CollectionSource { - explicit CollectionSource(aql::Collection* collection) noexcept - : collection(collection) { - } - CollectionSource(CollectionSource&&) = default; - CollectionSource& operator=(CollectionSource&&) = default; + explicit CollectionSource(aql::Collection* collection) + : collection(collection) {} aql::Collection* collection{}; // The collection used to connect to this engine - std::string restrictedShard; // The shard this snippet is restricted to + std::unordered_set restrictedShards{}; // The shards this snippet is restricted to }; struct ViewSource { diff --git a/arangod/IResearch/IResearchAnalyzerFeature.cpp b/arangod/IResearch/IResearchAnalyzerFeature.cpp index 19e788f76b58..d8f1679c9b46 100644 --- a/arangod/IResearch/IResearchAnalyzerFeature.cpp +++ b/arangod/IResearch/IResearchAnalyzerFeature.cpp @@ -28,7 +28,9 @@ #undef NOEXCEPT #endif +#include #include "analysis/analyzers.hpp" +#include "analysis/token_streams.hpp" #include "analysis/token_attributes.hpp" #include "analysis/text_token_stream.hpp" #include "analysis/delimited_token_stream.hpp" @@ -383,97 +385,174 @@ namespace norm_vpack { arangodb::aql::AqlValue aqlFnTokens(arangodb::aql::ExpressionContext* expressionContext, arangodb::transaction::Methods* trx, arangodb::aql::VPackFunctionParameters const& args) { - if (2 != args.size() || !args[0].isString() || !args[1].isString()) { - irs::string_ref const message = "invalid arguments while computing result for function 'TOKENS'"; - + + if (ADB_UNLIKELY(args.empty() || args.size() > 2)) { + irs::string_ref const message = + "invalid arguments count while computing result for function 'TOKENS'"; LOG_TOPIC("740fd", WARN, arangodb::iresearch::TOPIC) << message; - THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER, message); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_QUERY_FUNCTION_ARGUMENT_NUMBER_MISMATCH, message); } - auto data = arangodb::iresearch::getStringRef(args[0].slice()); - auto name = arangodb::iresearch::getStringRef(args[1].slice()); - auto* analyzers = - arangodb::application_features::ApplicationServer::getFeature(); - - TRI_ASSERT(analyzers); - + if (args.size() > 1 && !args[1].isString()) { // second arg must be analyzer name + irs::string_ref const message = + "invalid analyzer name argument type while computing result for function 'TOKENS'," + " string expected"; + LOG_TOPIC("d0b60", WARN, arangodb::iresearch::TOPIC) << message; + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER, message); + } + arangodb::iresearch::IResearchAnalyzerFeature::AnalyzerPool::ptr pool; - - if (trx) { - auto* sysDatabase = arangodb::application_features::ApplicationServer::lookupFeature< // find feature - arangodb::SystemDatabaseFeature // featue type - >(); - - auto sysVocbase = sysDatabase ? sysDatabase->use() : nullptr; - - if (sysVocbase) { - pool = analyzers->get(name, trx->vocbase(), *sysVocbase); + // identity now is default analyzer + auto const name = args.size() > 1 ? + arangodb::iresearch::getStringRef(args[1].slice()) : + iresearch::string_ref(arangodb::iresearch::IResearchAnalyzerFeature::identity()->name()); + + if( args.size() > 1) { + auto* analyzers = + arangodb::application_features::ApplicationServer::getFeature(); + TRI_ASSERT(analyzers); + if (trx) { + auto* sysDatabase = arangodb::application_features::ApplicationServer::lookupFeature< // find feature + arangodb::SystemDatabaseFeature>(); // featue type + auto sysVocbase = sysDatabase ? sysDatabase->use() : nullptr; + if (sysVocbase) { + pool = analyzers->get(name, trx->vocbase(), *sysVocbase); + } + } else { + pool = analyzers->get(name); // verbatim } - } else { - pool = analyzers->get(name); // verbatim + } else { //do not look for identity, we already have reference) + pool = arangodb::iresearch::IResearchAnalyzerFeature::identity(); } if (!pool) { - auto const message = "failure to find arangosearch analyzer with name '"s - + static_cast(name) - + "' while computing result for function 'TOKENS'"; + auto const message = "failure to find arangosearch analyzer with name '"s + + static_cast(name) + + "' while computing result for function 'TOKENS'"; LOG_TOPIC("0d256", WARN, arangodb::iresearch::TOPIC) << message; THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER, message); } - auto analyzer = pool->get(); + auto string_analyzer = pool->get(); - if (!analyzer) { - auto const message = "failure to find arangosearch analyzer with name '"s - + static_cast(name) - + "' while computing result for function 'TOKENS'"; + if (!string_analyzer) { + auto const message = "failure to get arangosearch analyzer with name '"s + + static_cast(name) + + "' while computing result for function 'TOKENS'"; LOG_TOPIC("d7477", WARN, arangodb::iresearch::TOPIC) << message; THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER, message); } - if (!analyzer->reset(data)) { - auto const message = "failure to reset arangosearch analyzer: ' "s - + static_cast(name) - + "' while computing result for function 'TOKENS'"; - - LOG_TOPIC("45a2d", WARN, arangodb::iresearch::TOPIC) << message; - THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, message); - } - - auto& values = analyzer->attributes().get(); + auto& string_terms = string_analyzer->attributes().get(); - if (!values) { + if (ADB_UNLIKELY(!string_terms)) { auto const message = - "failure to retrieve values from arangosearch analyzer name '"s - + static_cast(name) - + "' while computing result for function 'TOKENS'"; + "failure to retrieve values from arangosearch analyzer name '"s + + static_cast(name) + + "' while computing result for function 'TOKENS'"; LOG_TOPIC("f46f2", WARN, arangodb::iresearch::TOPIC) << message; THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, message); } + std::unique_ptr numeric_analyzer; + const irs::term_attribute* numeric_terms = nullptr; + // to avoid copying Builder's default buffer when initializing AqlValue // create the buffer externally and pass ownership directly into AqlValue - auto buffer = irs::memory::make_unique>(); - - if (!buffer) { - irs::string_ref const message = "failure to allocate result buffer while " - "computing result for function 'TOKENS'"; - - LOG_TOPIC("97cd0", WARN, arangodb::iresearch::TOPIC) << message; - THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_OUT_OF_MEMORY, message); - } - + auto buffer = std::make_unique>(); arangodb::velocypack::Builder builder(*buffer); - builder.openArray(); - - while (analyzer->next()) { - auto value = irs::ref_cast(values->value()); - - arangodb::iresearch::addStringRef(builder, value); - } + std::vector arrayIteratorStack; + auto current = args[0].slice(); + do { + // stack opening non-empty arrays + while (current.isArray() && !current.isEmptyArray()) { + arrayIteratorStack.emplace_back(current); + builder.openArray(); + current = arrayIteratorStack.back().value(); + } + // process current item + switch (current.type()) { + case VPackValueType::String: + if (!string_analyzer->reset(arangodb::iresearch::getStringRef(current))) { + auto const message = "failure to reset arangosearch analyzer: ' "s + + static_cast(name) + + "' while computing result for function 'TOKENS'"; + LOG_TOPIC("45a2d", WARN, arangodb::iresearch::TOPIC) << message; + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, message); + } + while (string_analyzer->next()) { + builder.add( + arangodb::iresearch::toValuePair(irs::ref_cast(string_terms->value()))); + } + break; + case VPackValueType::Bool: + builder.add( + arangodb::iresearch::toValuePair( + arangodb::basics::StringUtils::encodeBase64(irs::ref_cast( + irs::boolean_token_stream::value(current.getBoolean()))))); + break; + case VPackValueType::Null: + builder.add( + arangodb::iresearch::toValuePair( + arangodb::basics::StringUtils::encodeBase64( + irs::ref_cast(irs::null_token_stream::value_null())))); + break; + case VPackValueType::Array: // we get there only when empty array encountered + TRI_ASSERT(current.isEmptyArray()); + // empty array in = empty array out + builder.openArray(); + builder.close(); + break; + default: + if (current.isNumber()) { // there are many "number" types. To adopt all current and future ones just + // deal with them all here in generic way + if(!numeric_analyzer) { + numeric_analyzer = std::make_unique(); + numeric_terms = numeric_analyzer->attributes().get().get(); + if (ADB_UNLIKELY(!numeric_terms)) { + auto const message = + "failure to retrieve values from arangosearch numeric analyzer " + "while computing result for function 'TOKENS'"; + LOG_TOPIC("7d5df", WARN, arangodb::iresearch::TOPIC) << message; + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, message); + } + } + // we read all numers as doubles because ArangoSearch indexes + // all numbers as doubles, so do we there, as out goal is to + // return same tokens as will be in index for this specific number + numeric_analyzer->reset(current.getNumber()); + while (numeric_analyzer->next()) { + builder.add( + arangodb::iresearch::toValuePair( + arangodb::basics::StringUtils::encodeBase64( + irs::ref_cast(numeric_terms->value())))); + } + } else { + auto const message = "unexpected parameter type '"s + current.typeName() + + "' while computing result for function 'TOKENS'"; + LOG_TOPIC("45a2e", WARN, arangodb::iresearch::TOPIC) << message; + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER, message); + } + } + // de-stack all closing arrays + while (!arrayIteratorStack.empty()) { + auto& currentArrayIterator = arrayIteratorStack.back(); + if (!currentArrayIterator.isLast()) { + currentArrayIterator.next(); + current = currentArrayIterator.value(); + //next array for next item + builder.close(); + builder.openArray(); + break; + } else { + arrayIteratorStack.pop_back(); + builder.close(); + } + } + } while (!arrayIteratorStack.empty()); builder.close(); @@ -493,7 +572,7 @@ void addFunctions(arangodb::aql::AqlFunctionFeature& functions) { functions, arangodb::aql::Function{ "TOKENS", // name - ".,.", // positional arguments (data,analyzer) + ".|.", // positional arguments (data[,analyzer]) // deterministic (true == called during AST optimization and will be // used to calculate values for constant expressions) arangodb::aql::Function::makeFlags(arangodb::aql::Function::Flags::Deterministic, @@ -1591,7 +1670,7 @@ IResearchAnalyzerFeature::AnalyzerPool::ptr IResearchAnalyzerFeature::get( // fi Identity() { // find the 'identity' analyzer pool in the static analyzers auto& staticAnalyzers = getStaticAnalyzers(); - irs::string_ref name = "identity"; // hardcoded name of the identity analyzer pool + irs::string_ref name = IDENTITY_ANALYZER_NAME; // hardcoded name of the identity analyzer pool auto key = irs::make_hashed_ref(name, std::hash()); auto itr = staticAnalyzers.find(key); diff --git a/arangod/IResearch/VelocyPackHelper.cpp b/arangod/IResearch/VelocyPackHelper.cpp index 4d87821e4416..dc885b7d3261 100644 --- a/arangod/IResearch/VelocyPackHelper.cpp +++ b/arangod/IResearch/VelocyPackHelper.cpp @@ -27,21 +27,6 @@ #include "velocypack/Iterator.h" namespace { - -inline arangodb::velocypack::ValuePair toValuePair(irs::bytes_ref const& ref) { - TRI_ASSERT(!ref.null()); // consumers of ValuePair usually use memcpy(...) which cannot handle nullptr - return arangodb::velocypack::ValuePair( // value pair - ref.c_str(), ref.size(), arangodb::velocypack::ValueType::Binary // args - ); -} - -inline arangodb::velocypack::ValuePair toValuePair(irs::string_ref const& ref) { - TRI_ASSERT(!ref.null()); // consumers of ValuePair usually use memcpy(...) which cannot handle nullptr - return arangodb::velocypack::ValuePair( // value pair - ref.c_str(), ref.size(), arangodb::velocypack::ValueType::String // args - ); -} - template arangodb::velocypack::Builder& addRef( // add a value arangodb::velocypack::Builder& builder, // builder @@ -53,7 +38,7 @@ arangodb::velocypack::Builder& addRef( // add a value arangodb::velocypack::Value(arangodb::velocypack::ValueType::Null) // value ); } else { - builder.add(toValuePair(value)); + builder.add(arangodb::iresearch::toValuePair(value)); } return builder; @@ -75,12 +60,11 @@ arangodb::velocypack::Builder& addRef( // add a value arangodb::velocypack::Value(arangodb::velocypack::ValueType::Null) // value ); } else { - builder.add(key.c_str(), key.size(), toValuePair(value)); + builder.add(key.c_str(), key.size(), arangodb::iresearch::toValuePair(value)); } return builder; } - } namespace arangodb { @@ -236,4 +220,4 @@ ObjectIterator& ObjectIterator::operator++() { // ----------------------------------------------------------------------------- // --SECTION-- END-OF-FILE -// ----------------------------------------------------------------------------- \ No newline at end of file +// ----------------------------------------------------------------------------- diff --git a/arangod/IResearch/VelocyPackHelper.h b/arangod/IResearch/VelocyPackHelper.h index d53d656a8c21..db54347e6c98 100644 --- a/arangod/IResearch/VelocyPackHelper.h +++ b/arangod/IResearch/VelocyPackHelper.h @@ -97,6 +97,26 @@ arangodb::velocypack::Builder& addStringRef( // add a value irs::string_ref const& value // value ); +//////////////////////////////////////////////////////////////////////////////// +/// @brief wraps bytes ref with VPackValuePair +//////////////////////////////////////////////////////////////////////////////// +inline arangodb::velocypack::ValuePair toValuePair(irs::bytes_ref const& ref) { + TRI_ASSERT(!ref.null()); // consumers of ValuePair usually use memcpy(...) which cannot handle nullptr + return arangodb::velocypack::ValuePair( // value pair + ref.c_str(), ref.size(), arangodb::velocypack::ValueType::Binary // args + ); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief wraps string ref with VPackValuePair +//////////////////////////////////////////////////////////////////////////////// +inline arangodb::velocypack::ValuePair toValuePair(irs::string_ref const& ref) { + TRI_ASSERT(!ref.null()); // consumers of ValuePair usually use memcpy(...) which cannot handle nullptr + return arangodb::velocypack::ValuePair( // value pair + ref.c_str(), ref.size(), arangodb::velocypack::ValueType::String // args + ); +} + //////////////////////////////////////////////////////////////////////////////// /// @brief add a string_ref value to the 'builder' (for JSON objects) //////////////////////////////////////////////////////////////////////////////// diff --git a/arangod/Pregel/README.md b/arangod/Pregel/README.md index 3856e4f244b8..61768d4beb5c 100644 --- a/arangod/Pregel/README.md +++ b/arangod/Pregel/README.md @@ -1,67 +1,86 @@ -![ArangoDB-Logo](https://docs.arangodb.com/assets/arangodb_logo_2016_inverted.png) +# Pregel Subsystem -Pregel Subsystem -======== - -The pregel subsystem implements a variety of different grapg algorithms, +The Pregel subsystem implements a variety of different graph algorithms, this readme is more intended for internal use. -#### Protocol +## Protocol Message format between DBServers: - - -{sender:"someid", -executionNumber:1337, -globalSuperstep:123, -messages: [, , vertexID2, ] +```json +{ + "sender": "someid", + "executionNumber": 1337, + "globalSuperstep": 123, + "messages": [, , , ] } -Any type of slice is supported +``` +Any type of slice is supported -### Useful Commands +## Useful Commands Import graph e.g. https://github.com/arangodb/example-datasets/tree/master/Graphs/1000 First rename the columns '_key', '_from', '_to' arangoimport will keep those. In arangosh: - db._create('vertices', {numberOfShards: 2}); - db._createEdgeCollection('alt_edges'); - db._createEdgeCollection('edges', {numberOfShards: 2, shardKeys:["_vertex"], distributeShardsLike:'vertices'}); +```js +db._create('vertices', {numberOfShards: 2}); +db._createEdgeCollection('alt_edges'); +db._createEdgeCollection('edges', {numberOfShards: 2, shardKeys:["_vertex"], distributeShardsLike:'vertices'}); +``` +``` arangoimport --file generated_vertices.csv --type csv --collection vertices --overwrite true --server.endpoint http+tcp://127.0.0.1:8530 +``` Or: -for(var i=0; i < 5000; i++) db.vertices.save({_key:i+""}); - -arangoimport --file generated_edges.csv --type csv --collection alt_edges --overwrite true --from-collection-prefix "vertices" --to-collection-prefix "vertices" --convert false --server.endpoint http+tcp://127.0.0.1:8530 +```js +for(var i=0; i < 5000; i++) db.vertices.save({_key:i+""}); +``` +``` +arangoimport --file generated_edges.csv --type csv --collection alt_edges --overwrite true --from-collection-prefix "vertices" --to-collection-prefix "vertices" --convert false --server.endpoint http+tcp://127.0.0.1:8530 +``` AQL script to copy edge collection into one with '_vertex': +```js FOR doc IN alt_edges -INSERT {_vertex:SUBSTRING(doc._from,FIND_FIRST(doc._from,"/")+1), -_from:doc._from, -_to:doc._to} IN edges + INSERT { + _vertex: SUBSTRING(doc._from, FIND_FIRST(doc._from,"/") + 1), + _from: doc._from, + _to: doc._to + } INTO edges LET values = ( - FOR s IN vertices + FOR s IN vertices RETURN s.result ) RETURN SUM(values) +``` +## AWK Scripts -# AWK Scripts +Make CSV file with ID’s unique: -Make CSV file with ID’s unique +``` cat edges.csv | tr '[:space:]' '[\n*]' | grep -v "^\s*$" | awk '!seen[$0]++' > vertices.csv +``` -Make CSV file with arango compatible edges +Make CSV file with ArangoDB compatible edges: +``` cat edges.csv | awk -F" " '{print "profiles/" $1 "\tprofiles/" $2 "\t" $1}' >> arango-edges.csv +``` +Import the vertex and edge CSV files: +``` arangoimport --file vertices.csv --type csv --collection twitter_v --overwrite true --convert false --server.endpoint http+tcp://127.0.0.1:8530 -c none +``` + +``` arangoimport --file arango-edges.csv --type csv --collection twitter_e --overwrite true --convert false --separator "\t" --server.endpoint http+tcp://127.0.0.1:8530 -c none +``` diff --git a/etc/arangodb3/arangod.conf.in b/etc/arangodb3/arangod.conf.in index ae3ffcf3e5ec..f5887c233f89 100644 --- a/etc/arangodb3/arangod.conf.in +++ b/etc/arangodb3/arangod.conf.in @@ -1,7 +1,7 @@ # ArangoDB configuration file # # Documentation: -# https://docs.arangodb.com/latest/Manual/Administration/Configuration/ +# https://www.arangodb.com/docs/stable/administration-configuration.html # [database] diff --git a/js/apps/system/_admin/aardvark/APP/frontend/js/templates/modalGraphTable.ejs b/js/apps/system/_admin/aardvark/APP/frontend/js/templates/modalGraphTable.ejs index 99118c1d675e..ec9ba419c93a 100644 --- a/js/apps/system/_admin/aardvark/APP/frontend/js/templates/modalGraphTable.ejs +++ b/js/apps/system/_admin/aardvark/APP/frontend/js/templates/modalGraphTable.ejs @@ -48,7 +48,7 @@ -

    Need help? Visit our Graph Documentation

    +

    Need help? Visit our Graph Documentation

    diff --git a/js/apps/system/_admin/aardvark/APP/frontend/js/templates/navigationView.ejs b/js/apps/system/_admin/aardvark/APP/frontend/js/templates/navigationView.ejs index 87047693966c..1187882dc00a 100644 --- a/js/apps/system/_admin/aardvark/APP/frontend/js/templates/navigationView.ejs +++ b/js/apps/system/_admin/aardvark/APP/frontend/js/templates/navigationView.ejs @@ -75,11 +75,11 @@